* [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file
@ 2019-04-11 11:00 Cian Ferriter
2019-04-11 11:00 ` Cian Ferriter
2019-05-27 15:15 ` Ferruh Yigit
0 siblings, 2 replies; 4+ messages in thread
From: Cian Ferriter @ 2019-04-11 11:00 UTC (permalink / raw)
To: Ferruh Yigit, Bruce Richardson, John McNamara, Marko Kovacevic
Cc: dev, Cian Ferriter
It can be useful to use pcap files for some rudimental performance
testing. This patch enables this functionality in the pcap driver.
At a high level, this works by creaing a ring of sufficient size to
store the packets in the pcap file passed to the application. When the
rx function for this mode is called, packets are dequeued from the ring
for use by the application and also enqueued back on to the ring to be
"received" again.
A tx_drop mode is also added since transmitting to a tx_pcap file isn't
desirable at a high traffic rate.
Jumbo frames are not supported in this mode. When filling the ring at rx
queue setup time, the presence of multi segment mbufs is checked for.
The PMD will exit on detection of these multi segment mbufs.
Signed-off-by: Cian Ferriter <cian.ferriter@intel.com>
---
doc/guides/nics/pcap_ring.rst | 15 +++
drivers/net/pcap/rte_eth_pcap.c | 209 ++++++++++++++++++++++++++++++--
2 files changed, 215 insertions(+), 9 deletions(-)
diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
index c1ef9196b..45f55a345 100644
--- a/doc/guides/nics/pcap_ring.rst
+++ b/doc/guides/nics/pcap_ring.rst
@@ -106,6 +106,21 @@ Runtime Config Options
--vdev 'net_pcap0,iface=eth0,phy_mac=1'
+- Use the RX PCAP file to infinitely receive packets
+
+ In case ``rx_pcap=`` configuration is set, user may want to use the selected PCAP file for rudimental
+ performance testing. This can be done with a ``devarg`` ``infinite_rx``, for example::
+
+ --vdev 'net_pcap0,rx_pcap=file_rx.pcap,infinite_rx=1,tx_drop=1'
+
+ When this mode is used, it is recommended to use the ``tx_drop`` ``devarg``.
+
+- Drop all packets on transmit
+
+ The user may want to drop all packets on tx for a device. This can be done with the ``tx_drop`` ``devarg``, for example::
+
+ --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_drop=1'
+
Examples of Usage
^^^^^^^^^^^^^^^^^
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 353538f16..b72db973a 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -39,6 +39,8 @@
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
+#define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
+#define ETH_PCAP_TX_DROP_ARG "tx_drop"
#define ETH_PCAP_ARG_MAXLEN 64
@@ -64,6 +66,9 @@ struct pcap_rx_queue {
struct queue_stat rx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
+
+ /* Contains pre-generated packets, to be looped through */
+ struct rte_ring *pkts;
};
struct pcap_tx_queue {
@@ -82,6 +87,7 @@ struct pmd_internals {
int if_index;
int single_iface;
int phy_mac;
+ int infinite_rx;
};
struct pmd_process_private {
@@ -109,6 +115,8 @@ static const char *valid_arguments[] = {
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
ETH_PCAP_PHY_MAC_ARG,
+ ETH_PCAP_INFINITE_RX_ARG,
+ ETH_PCAP_TX_DROP_ARG,
NULL
};
@@ -178,6 +186,40 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
}
}
+static uint16_t
+eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ int i;
+ struct pcap_rx_queue *pcap_q = queue;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *pcap_buf;
+ int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
+ if (err)
+ rte_panic("failed to dequeue from pcap pkts ring\n");
+
+ rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
+ rte_pktmbuf_mtod(pcap_buf, void *),
+ pcap_buf->data_len);
+ bufs[i]->data_len = pcap_buf->data_len;
+ bufs[i]->pkt_len = pcap_buf->pkt_len;
+ bufs[i]->port = pcap_q->port_id;
+
+ /* enqueue packet back on ring to allow infinite rx */
+ rte_ring_enqueue(pcap_q->pkts, pcap_buf);
+ }
+
+ pcap_q->rx_stat.pkts += i;
+
+ return i;
+}
+
static uint16_t
eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -320,6 +362,26 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_tx;
}
+/*
+ * Callback to handle dropping packets in the infinite rx case.
+ */
+static uint16_t
+eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct pcap_tx_queue *tx_queue = queue;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++)
+ rte_pktmbuf_free(bufs[i]);
+
+ tx_queue->tx_stat.pkts += nb_pkts;
+
+ return i;
+}
+
/*
* Callback to handle sending packets through a real NIC.
*/
@@ -447,6 +509,24 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
return 0;
}
+static uint64_t
+count_packets_in_pcaps(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
+{
+ const u_char *packet;
+ struct pcap_pkthdr header;
+ uint64_t pcap_pkt_count;
+
+ while ((packet = pcap_next(*pcap, &header)))
+ pcap_pkt_count++;
+
+ /* the pcap is reopened so it can be used as normal later */
+ pcap_close(*pcap);
+ *pcap = NULL;
+ open_single_rx_pcap(pcap_q->name, pcap);
+
+ return pcap_pkt_count;
+}
+
static int
eth_dev_start(struct rte_eth_dev *dev)
{
@@ -663,6 +743,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool)
{
+ unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
@@ -671,6 +752,49 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
pcap_q->queue_id = rx_queue_id;
dev->data->rx_queues[rx_queue_id] = pcap_q;
+ if (internals->infinite_rx) {
+ struct pmd_process_private *pp;
+ char ring_name[NAME_MAX];
+ uint16_t num_rx = 0;
+ static uint32_t ring_number;
+ uint64_t pcap_pkt_count = 0;
+ pcap_t **pcap;
+
+ pp = rte_eth_devices[pcap_q->port_id].process_private;
+ pcap = &pp->rx_pcap[pcap_q->queue_id];
+
+ if (unlikely(*pcap == NULL))
+ rte_panic("no packets in pcap to fill ring with\n");
+
+ pcap_pkt_count = count_packets_in_pcaps(pcap, pcap_q);
+
+ snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
+ ring_number);
+
+ pcap_q->pkts = rte_ring_create(ring_name,
+ rte_align64pow2(pcap_pkt_count + 1), 0,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ ring_number++;
+ if (!pcap_q->pkts)
+ rte_panic("failed to alloc ring\n");
+
+
+ struct rte_mbuf *bufs[pcap_pkt_count];
+ if (rte_pktmbuf_alloc_bulk(mb_pool, bufs, pcap_pkt_count) != 0)
+ return 0;
+
+ num_rx = eth_pcap_rx(pcap_q, bufs, pcap_pkt_count);
+
+ /* check for multiseg mbufs */
+ for (i = 0; i < num_rx; i++) {
+ if (bufs[i]->nb_segs != 1)
+ rte_panic("jumbo frames are not supported in infinite rx mode\n");
+ }
+
+ rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs,
+ num_rx, NULL);
+ }
+
return 0;
}
@@ -908,6 +1032,34 @@ select_phy_mac(const char *key __rte_unused, const char *value,
return 0;
}
+static int
+get_infinite_rx_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int infinite_rx = atoi(value);
+ int *enable_infinite_rx = extra_args;
+
+ if (infinite_rx)
+ *enable_infinite_rx = 1;
+ }
+ return 0;
+}
+
+static int
+get_tx_drop_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int tx_drop = atoi(value);
+ int *enable_tx_drop = extra_args;
+
+ if (tx_drop)
+ *enable_tx_drop = 1;
+ }
+ return 0;
+}
+
static struct rte_vdev_driver pmd_pcap_drv;
static int
@@ -1105,7 +1257,8 @@ static int
eth_from_pcaps(struct rte_vdev_device *vdev,
struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
- int single_iface, unsigned int using_dumpers)
+ int single_iface, unsigned int using_dumpers,
+ unsigned int infinite_rx)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -1132,9 +1285,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
}
}
- eth_dev->rx_pkt_burst = eth_pcap_rx;
+ /* assign rx ops */
+ if (infinite_rx) {
+ eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
+ internals->infinite_rx = infinite_rx;
+ } else {
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ }
- if (using_dumpers)
+ /* assign tx ops */
+ if (infinite_rx)
+ eth_dev->tx_pkt_burst = eth_tx_drop;
+ else if (using_dumpers)
eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
else
eth_dev->tx_pkt_burst = eth_pcap_tx;
@@ -1148,6 +1310,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
{
const char *name;
unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
+ unsigned int infinite_rx = 0, tx_drop = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
@@ -1217,6 +1380,17 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
pcaps.num_of_queue = 0;
if (is_rx_pcap) {
+ /*
+ * We check whether we want to infinitely rx the pcap file
+ */
+ if (rte_kvargs_count(kvlist, ETH_PCAP_INFINITE_RX_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_PCAP_INFINITE_RX_ARG,
+ &get_infinite_rx_arg, &infinite_rx);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
} else {
@@ -1228,18 +1402,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
goto free_kvlist;
/*
- * We check whether we want to open a TX stream to a real NIC or a
- * pcap file
+ * We check whether we want to open a TX stream to a real NIC,
+ * a pcap file, or drop packets on tx
*/
is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
dumpers.num_of_queue = 0;
- if (is_tx_pcap)
+ tx_drop = rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) ? 1 : 0;
+ if (rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_DROP_ARG,
+ &get_tx_drop_arg, &tx_drop);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+ if (tx_drop) {
+ /* add dummy queue which counts dropped packets */
+ ret = add_queue(&dumpers, "dummy", "tx_drop", NULL, NULL);
+ } else if (is_tx_pcap) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
&open_tx_pcap, &dumpers);
- else
+ } else {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
+ }
if (ret < 0)
goto free_kvlist;
@@ -1285,8 +1471,11 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
goto free_kvlist;
}
+ PMD_LOG(INFO, "Configure pmd_pcap: infinite_rx is %s",
+ infinite_rx ? "enabled" : "disabled");
+
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
- dumpers.num_of_queue, single_iface, is_tx_pcap);
+ dumpers.num_of_queue, single_iface, is_tx_pcap, infinite_rx);
free_kvlist:
rte_kvargs_free(kvlist);
@@ -1338,7 +1527,9 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc> "
- ETH_PCAP_PHY_MAC_ARG "=<int>");
+ ETH_PCAP_PHY_MAC_ARG "=<int>"
+ ETH_PCAP_INFINITE_RX_ARG "=<int>"
+ ETH_PCAP_TX_DROP_ARG "=<int>");
RTE_INIT(eth_pcap_init_log)
{
--
2.17.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file
2019-04-11 11:00 [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file Cian Ferriter
@ 2019-04-11 11:00 ` Cian Ferriter
2019-05-27 15:15 ` Ferruh Yigit
1 sibling, 0 replies; 4+ messages in thread
From: Cian Ferriter @ 2019-04-11 11:00 UTC (permalink / raw)
To: Ferruh Yigit, Bruce Richardson, John McNamara, Marko Kovacevic
Cc: dev, Cian Ferriter
It can be useful to use pcap files for some rudimental performance
testing. This patch enables this functionality in the pcap driver.
At a high level, this works by creaing a ring of sufficient size to
store the packets in the pcap file passed to the application. When the
rx function for this mode is called, packets are dequeued from the ring
for use by the application and also enqueued back on to the ring to be
"received" again.
A tx_drop mode is also added since transmitting to a tx_pcap file isn't
desirable at a high traffic rate.
Jumbo frames are not supported in this mode. When filling the ring at rx
queue setup time, the presence of multi segment mbufs is checked for.
The PMD will exit on detection of these multi segment mbufs.
Signed-off-by: Cian Ferriter <cian.ferriter@intel.com>
---
doc/guides/nics/pcap_ring.rst | 15 +++
drivers/net/pcap/rte_eth_pcap.c | 209 ++++++++++++++++++++++++++++++--
2 files changed, 215 insertions(+), 9 deletions(-)
diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
index c1ef9196b..45f55a345 100644
--- a/doc/guides/nics/pcap_ring.rst
+++ b/doc/guides/nics/pcap_ring.rst
@@ -106,6 +106,21 @@ Runtime Config Options
--vdev 'net_pcap0,iface=eth0,phy_mac=1'
+- Use the RX PCAP file to infinitely receive packets
+
+ In case ``rx_pcap=`` configuration is set, user may want to use the selected PCAP file for rudimental
+ performance testing. This can be done with a ``devarg`` ``infinite_rx``, for example::
+
+ --vdev 'net_pcap0,rx_pcap=file_rx.pcap,infinite_rx=1,tx_drop=1'
+
+ When this mode is used, it is recommended to use the ``tx_drop`` ``devarg``.
+
+- Drop all packets on transmit
+
+ The user may want to drop all packets on tx for a device. This can be done with the ``tx_drop`` ``devarg``, for example::
+
+ --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_drop=1'
+
Examples of Usage
^^^^^^^^^^^^^^^^^
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 353538f16..b72db973a 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -39,6 +39,8 @@
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
+#define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
+#define ETH_PCAP_TX_DROP_ARG "tx_drop"
#define ETH_PCAP_ARG_MAXLEN 64
@@ -64,6 +66,9 @@ struct pcap_rx_queue {
struct queue_stat rx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
+
+ /* Contains pre-generated packets, to be looped through */
+ struct rte_ring *pkts;
};
struct pcap_tx_queue {
@@ -82,6 +87,7 @@ struct pmd_internals {
int if_index;
int single_iface;
int phy_mac;
+ int infinite_rx;
};
struct pmd_process_private {
@@ -109,6 +115,8 @@ static const char *valid_arguments[] = {
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
ETH_PCAP_PHY_MAC_ARG,
+ ETH_PCAP_INFINITE_RX_ARG,
+ ETH_PCAP_TX_DROP_ARG,
NULL
};
@@ -178,6 +186,40 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
}
}
+static uint16_t
+eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ int i;
+ struct pcap_rx_queue *pcap_q = queue;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *pcap_buf;
+ int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
+ if (err)
+ rte_panic("failed to dequeue from pcap pkts ring\n");
+
+ rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
+ rte_pktmbuf_mtod(pcap_buf, void *),
+ pcap_buf->data_len);
+ bufs[i]->data_len = pcap_buf->data_len;
+ bufs[i]->pkt_len = pcap_buf->pkt_len;
+ bufs[i]->port = pcap_q->port_id;
+
+ /* enqueue packet back on ring to allow infinite rx */
+ rte_ring_enqueue(pcap_q->pkts, pcap_buf);
+ }
+
+ pcap_q->rx_stat.pkts += i;
+
+ return i;
+}
+
static uint16_t
eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -320,6 +362,26 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_tx;
}
+/*
+ * Callback to handle dropping packets in the infinite rx case.
+ */
+static uint16_t
+eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct pcap_tx_queue *tx_queue = queue;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++)
+ rte_pktmbuf_free(bufs[i]);
+
+ tx_queue->tx_stat.pkts += nb_pkts;
+
+ return i;
+}
+
/*
* Callback to handle sending packets through a real NIC.
*/
@@ -447,6 +509,24 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
return 0;
}
+static uint64_t
+count_packets_in_pcaps(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
+{
+ const u_char *packet;
+ struct pcap_pkthdr header;
+ uint64_t pcap_pkt_count;
+
+ while ((packet = pcap_next(*pcap, &header)))
+ pcap_pkt_count++;
+
+ /* the pcap is reopened so it can be used as normal later */
+ pcap_close(*pcap);
+ *pcap = NULL;
+ open_single_rx_pcap(pcap_q->name, pcap);
+
+ return pcap_pkt_count;
+}
+
static int
eth_dev_start(struct rte_eth_dev *dev)
{
@@ -663,6 +743,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool)
{
+ unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
@@ -671,6 +752,49 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
pcap_q->queue_id = rx_queue_id;
dev->data->rx_queues[rx_queue_id] = pcap_q;
+ if (internals->infinite_rx) {
+ struct pmd_process_private *pp;
+ char ring_name[NAME_MAX];
+ uint16_t num_rx = 0;
+ static uint32_t ring_number;
+ uint64_t pcap_pkt_count = 0;
+ pcap_t **pcap;
+
+ pp = rte_eth_devices[pcap_q->port_id].process_private;
+ pcap = &pp->rx_pcap[pcap_q->queue_id];
+
+ if (unlikely(*pcap == NULL))
+ rte_panic("no packets in pcap to fill ring with\n");
+
+ pcap_pkt_count = count_packets_in_pcaps(pcap, pcap_q);
+
+ snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
+ ring_number);
+
+ pcap_q->pkts = rte_ring_create(ring_name,
+ rte_align64pow2(pcap_pkt_count + 1), 0,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ ring_number++;
+ if (!pcap_q->pkts)
+ rte_panic("failed to alloc ring\n");
+
+
+ struct rte_mbuf *bufs[pcap_pkt_count];
+ if (rte_pktmbuf_alloc_bulk(mb_pool, bufs, pcap_pkt_count) != 0)
+ return 0;
+
+ num_rx = eth_pcap_rx(pcap_q, bufs, pcap_pkt_count);
+
+ /* check for multiseg mbufs */
+ for (i = 0; i < num_rx; i++) {
+ if (bufs[i]->nb_segs != 1)
+ rte_panic("jumbo frames are not supported in infinite rx mode\n");
+ }
+
+ rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs,
+ num_rx, NULL);
+ }
+
return 0;
}
@@ -908,6 +1032,34 @@ select_phy_mac(const char *key __rte_unused, const char *value,
return 0;
}
+static int
+get_infinite_rx_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int infinite_rx = atoi(value);
+ int *enable_infinite_rx = extra_args;
+
+ if (infinite_rx)
+ *enable_infinite_rx = 1;
+ }
+ return 0;
+}
+
+static int
+get_tx_drop_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int tx_drop = atoi(value);
+ int *enable_tx_drop = extra_args;
+
+ if (tx_drop)
+ *enable_tx_drop = 1;
+ }
+ return 0;
+}
+
static struct rte_vdev_driver pmd_pcap_drv;
static int
@@ -1105,7 +1257,8 @@ static int
eth_from_pcaps(struct rte_vdev_device *vdev,
struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
- int single_iface, unsigned int using_dumpers)
+ int single_iface, unsigned int using_dumpers,
+ unsigned int infinite_rx)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -1132,9 +1285,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
}
}
- eth_dev->rx_pkt_burst = eth_pcap_rx;
+ /* assign rx ops */
+ if (infinite_rx) {
+ eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
+ internals->infinite_rx = infinite_rx;
+ } else {
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ }
- if (using_dumpers)
+ /* assign tx ops */
+ if (infinite_rx)
+ eth_dev->tx_pkt_burst = eth_tx_drop;
+ else if (using_dumpers)
eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
else
eth_dev->tx_pkt_burst = eth_pcap_tx;
@@ -1148,6 +1310,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
{
const char *name;
unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
+ unsigned int infinite_rx = 0, tx_drop = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
@@ -1217,6 +1380,17 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
pcaps.num_of_queue = 0;
if (is_rx_pcap) {
+ /*
+ * We check whether we want to infinitely rx the pcap file
+ */
+ if (rte_kvargs_count(kvlist, ETH_PCAP_INFINITE_RX_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_PCAP_INFINITE_RX_ARG,
+ &get_infinite_rx_arg, &infinite_rx);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
} else {
@@ -1228,18 +1402,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
goto free_kvlist;
/*
- * We check whether we want to open a TX stream to a real NIC or a
- * pcap file
+ * We check whether we want to open a TX stream to a real NIC,
+ * a pcap file, or drop packets on tx
*/
is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
dumpers.num_of_queue = 0;
- if (is_tx_pcap)
+ tx_drop = rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) ? 1 : 0;
+ if (rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_DROP_ARG,
+ &get_tx_drop_arg, &tx_drop);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+ if (tx_drop) {
+ /* add dummy queue which counts dropped packets */
+ ret = add_queue(&dumpers, "dummy", "tx_drop", NULL, NULL);
+ } else if (is_tx_pcap) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
&open_tx_pcap, &dumpers);
- else
+ } else {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
+ }
if (ret < 0)
goto free_kvlist;
@@ -1285,8 +1471,11 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
goto free_kvlist;
}
+ PMD_LOG(INFO, "Configure pmd_pcap: infinite_rx is %s",
+ infinite_rx ? "enabled" : "disabled");
+
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
- dumpers.num_of_queue, single_iface, is_tx_pcap);
+ dumpers.num_of_queue, single_iface, is_tx_pcap, infinite_rx);
free_kvlist:
rte_kvargs_free(kvlist);
@@ -1338,7 +1527,9 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc> "
- ETH_PCAP_PHY_MAC_ARG "=<int>");
+ ETH_PCAP_PHY_MAC_ARG "=<int>"
+ ETH_PCAP_INFINITE_RX_ARG "=<int>"
+ ETH_PCAP_TX_DROP_ARG "=<int>");
RTE_INIT(eth_pcap_init_log)
{
--
2.17.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file
2019-04-11 11:00 [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file Cian Ferriter
2019-04-11 11:00 ` Cian Ferriter
@ 2019-05-27 15:15 ` Ferruh Yigit
2019-06-05 14:34 ` Ferriter, Cian
1 sibling, 1 reply; 4+ messages in thread
From: Ferruh Yigit @ 2019-05-27 15:15 UTC (permalink / raw)
To: Cian Ferriter, Bruce Richardson, John McNamara, Marko Kovacevic; +Cc: dev
On 4/11/2019 12:00 PM, Cian Ferriter wrote:
> It can be useful to use pcap files for some rudimental performance
> testing. This patch enables this functionality in the pcap driver.
>
> At a high level, this works by creaing a ring of sufficient size to
> store the packets in the pcap file passed to the application. When the
> rx function for this mode is called, packets are dequeued from the ring
> for use by the application and also enqueued back on to the ring to be
> "received" again.
>
> A tx_drop mode is also added since transmitting to a tx_pcap file isn't
> desirable at a high traffic rate.
>
> Jumbo frames are not supported in this mode. When filling the ring at rx
> queue setup time, the presence of multi segment mbufs is checked for.
> The PMD will exit on detection of these multi segment mbufs.
>
> Signed-off-by: Cian Ferriter <cian.ferriter@intel.com>
> ---
> doc/guides/nics/pcap_ring.rst | 15 +++
> drivers/net/pcap/rte_eth_pcap.c | 209 ++++++++++++++++++++++++++++++--
> 2 files changed, 215 insertions(+), 9 deletions(-)
>
> diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
> index c1ef9196b..45f55a345 100644
> --- a/doc/guides/nics/pcap_ring.rst
> +++ b/doc/guides/nics/pcap_ring.rst
> @@ -106,6 +106,21 @@ Runtime Config Options
>
> --vdev 'net_pcap0,iface=eth0,phy_mac=1'
>
> +- Use the RX PCAP file to infinitely receive packets
> +
> + In case ``rx_pcap=`` configuration is set, user may want to use the selected PCAP file for rudimental
> + performance testing. This can be done with a ``devarg`` ``infinite_rx``, for example::
> +
> + --vdev 'net_pcap0,rx_pcap=file_rx.pcap,infinite_rx=1,tx_drop=1'
> +
> + When this mode is used, it is recommended to use the ``tx_drop`` ``devarg``.
> +
> +- Drop all packets on transmit
> +
> + The user may want to drop all packets on tx for a device. This can be done with the ``tx_drop`` ``devarg``, for example::
> +
> + --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_drop=1'
Is there a performance drop when tx files are directed to 'loopback' interface,
like: --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_iface=lo' ?
If no performance drop I am for using 'lo' interface instead of adding new devargs.
> +
> Examples of Usage
> ^^^^^^^^^^^^^^^^^
>
> diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
> index 353538f16..b72db973a 100644
> --- a/drivers/net/pcap/rte_eth_pcap.c
> +++ b/drivers/net/pcap/rte_eth_pcap.c
<...>
> @@ -178,6 +186,40 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
> }
> }
>
> +static uint16_t
> +eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> +{
> + int i;
> + struct pcap_rx_queue *pcap_q = queue;
> +
> + if (unlikely(nb_pkts == 0))
> + return 0;
> +
> + if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
> + return 0;
> +
> + for (i = 0; i < nb_pkts; i++) {
> + struct rte_mbuf *pcap_buf;
> + int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
> + if (err)
> + rte_panic("failed to dequeue from pcap pkts ring\n");
Please don't have any rte_panic() in the driver code, there are a few more in patch.
> +
> + rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
> + rte_pktmbuf_mtod(pcap_buf, void *),
> + pcap_buf->data_len);
> + bufs[i]->data_len = pcap_buf->data_len;
> + bufs[i]->pkt_len = pcap_buf->pkt_len;
> + bufs[i]->port = pcap_q->port_id;
> +
> + /* enqueue packet back on ring to allow infinite rx */
> + rte_ring_enqueue(pcap_q->pkts, pcap_buf);
> + }
> +
> + pcap_q->rx_stat.pkts += i;
For consistency, can you please update "pcap_q->rx_stat.bytes" too?
> +
> + return i;
> +}
> +
> static uint16_t
> eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> {
<...>
> @@ -447,6 +509,24 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
> return 0;
> }
>
> +static uint64_t
> +count_packets_in_pcaps(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
> +{
> + const u_char *packet;
> + struct pcap_pkthdr header;
> + uint64_t pcap_pkt_count;
Compiler is complaining about uninitialized 'pcap_pkt_count'.
> +
> + while ((packet = pcap_next(*pcap, &header)))
> + pcap_pkt_count++;
It seems there is no quicker way to get number of packets from a pcap file, if
anyone know a way, comment is welcome.
> +
> + /* the pcap is reopened so it can be used as normal later */
> + pcap_close(*pcap);
> + *pcap = NULL;
> + open_single_rx_pcap(pcap_q->name, pcap);
> +
> + return pcap_pkt_count;
> +}
> +
> static int
> eth_dev_start(struct rte_eth_dev *dev)
> {
<...>
> @@ -671,6 +752,49 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
> pcap_q->queue_id = rx_queue_id;
> dev->data->rx_queues[rx_queue_id] = pcap_q;
>
> + if (internals->infinite_rx) {
> + struct pmd_process_private *pp;
> + char ring_name[NAME_MAX];
> + uint16_t num_rx = 0;
> + static uint32_t ring_number;
> + uint64_t pcap_pkt_count = 0;
> + pcap_t **pcap;
> +
> + pp = rte_eth_devices[pcap_q->port_id].process_private;
> + pcap = &pp->rx_pcap[pcap_q->queue_id];
> +
> + if (unlikely(*pcap == NULL))
> + rte_panic("no packets in pcap to fill ring with\n");
> +
> + pcap_pkt_count = count_packets_in_pcaps(pcap, pcap_q);
> +
> + snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
> + ring_number);
> +
> + pcap_q->pkts = rte_ring_create(ring_name,
> + rte_align64pow2(pcap_pkt_count + 1), 0,
> + RING_F_SP_ENQ | RING_F_SC_DEQ);
> + ring_number++;
> + if (!pcap_q->pkts)
> + rte_panic("failed to alloc ring\n");
> +
> +
> + struct rte_mbuf *bufs[pcap_pkt_count];
Can you please move deceleration to the beginning of the function.
> + if (rte_pktmbuf_alloc_bulk(mb_pool, bufs, pcap_pkt_count) != 0)
> + return 0;
Should this return error?
> +
> + num_rx = eth_pcap_rx(pcap_q, bufs, pcap_pkt_count);
> +
> + /* check for multiseg mbufs */
> + for (i = 0; i < num_rx; i++) {
> + if (bufs[i]->nb_segs != 1)
> + rte_panic("jumbo frames are not supported in infinite rx mode\n");
This need to be replaced with returning error, and please remember to cleanup in
that case.
> + }
> +
> + rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs,
> + num_rx, NULL);
> + }
> +
> return 0;
> }
>
<...>
> @@ -1132,9 +1285,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
> }
> }
>
> - eth_dev->rx_pkt_burst = eth_pcap_rx;
> + /* assign rx ops */
> + if (infinite_rx) {
> + eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
> + internals->infinite_rx = infinite_rx;
Since "infinite_rx" can be 0 or 1, isn't it better to set
"internals->infinite_rx" out of this check, although functionally it will be
same (since internals is all zero by default), logically better I think
> + } else {
> + eth_dev->rx_pkt_burst = eth_pcap_rx;
> + }
>
> - if (using_dumpers)
> + /* assign tx ops */
> + if (infinite_rx)
> + eth_dev->tx_pkt_burst = eth_tx_drop;
Shouldn't set 'tx_pkt_burst' with 'infinite_rx' check, I guess intention was
'tx_drop' check.
> + else if (using_dumpers)
> eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
> else
> eth_dev->tx_pkt_burst = eth_pcap_tx;
<...>
> @@ -1217,6 +1380,17 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
> pcaps.num_of_queue = 0;
>
> if (is_rx_pcap) {
> + /*
> + * We check whether we want to infinitely rx the pcap file
> + */
> + if (rte_kvargs_count(kvlist, ETH_PCAP_INFINITE_RX_ARG) == 1) {
What do you think printing a warning if user provided the value more than once?
> + ret = rte_kvargs_process(kvlist,
> + ETH_PCAP_INFINITE_RX_ARG,
> + &get_infinite_rx_arg, &infinite_rx);
> + if (ret < 0)
> + goto free_kvlist;
> + }
> +
> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
> &open_rx_pcap, &pcaps);
> } else {
> @@ -1228,18 +1402,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
> goto free_kvlist;
>
> /*
> - * We check whether we want to open a TX stream to a real NIC or a
> - * pcap file
> + * We check whether we want to open a TX stream to a real NIC,
> + * a pcap file, or drop packets on tx
> */
> is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
> dumpers.num_of_queue = 0;
>
> - if (is_tx_pcap)
> + tx_drop = rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) ? 1 : 0;
Can drop this line, below already getting the 'tx_drop' value, overwriting this one.
> + if (rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) == 1) {
> + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_DROP_ARG,
> + &get_tx_drop_arg, &tx_drop);
> + if (ret < 0)
> + goto free_kvlist;
> + }
> +
> + if (tx_drop) {
> + /* add dummy queue which counts dropped packets */
> + ret = add_queue(&dumpers, "dummy", "tx_drop", NULL, NULL);
This will break the multiple queue use case. Instead of adding a single queue,
need to find a way to add multiple queues as user want and all will just ignore
the packtets.
> + } else if (is_tx_pcap) {
> ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
> &open_tx_pcap, &dumpers);
> - else
> + } else {
> ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
> &open_tx_iface, &dumpers);
> + }
>
> if (ret < 0)
> goto free_kvlist;
<...>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file
2019-05-27 15:15 ` Ferruh Yigit
@ 2019-06-05 14:34 ` Ferriter, Cian
0 siblings, 0 replies; 4+ messages in thread
From: Ferriter, Cian @ 2019-06-05 14:34 UTC (permalink / raw)
To: Yigit, Ferruh, Richardson, Bruce, Mcnamara, John, Kovacevic, Marko; +Cc: dev
Hi Ferruh,
Thanks for the review!
I've posted the v2 of this patch.
My replies are inline
Thanks,
Cian
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: 27 May 2019 16:15
> To: Ferriter, Cian <cian.ferriter@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>; Kovacevic, Marko
> <marko.kovacevic@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file
>
> On 4/11/2019 12:00 PM, Cian Ferriter wrote:
> > It can be useful to use pcap files for some rudimental performance
> > testing. This patch enables this functionality in the pcap driver.
> >
> > At a high level, this works by creaing a ring of sufficient size to
> > store the packets in the pcap file passed to the application. When the
> > rx function for this mode is called, packets are dequeued from the
> > ring for use by the application and also enqueued back on to the ring
> > to be "received" again.
> >
> > A tx_drop mode is also added since transmitting to a tx_pcap file
> > isn't desirable at a high traffic rate.
> >
> > Jumbo frames are not supported in this mode. When filling the ring at
> > rx queue setup time, the presence of multi segment mbufs is checked for.
> > The PMD will exit on detection of these multi segment mbufs.
> >
> > Signed-off-by: Cian Ferriter <cian.ferriter@intel.com>
> > ---
> > doc/guides/nics/pcap_ring.rst | 15 +++
> > drivers/net/pcap/rte_eth_pcap.c | 209
> > ++++++++++++++++++++++++++++++--
> > 2 files changed, 215 insertions(+), 9 deletions(-)
> >
> > diff --git a/doc/guides/nics/pcap_ring.rst
> > b/doc/guides/nics/pcap_ring.rst index c1ef9196b..45f55a345 100644
> > --- a/doc/guides/nics/pcap_ring.rst
> > +++ b/doc/guides/nics/pcap_ring.rst
> > @@ -106,6 +106,21 @@ Runtime Config Options
> >
> > --vdev 'net_pcap0,iface=eth0,phy_mac=1'
> >
> > +- Use the RX PCAP file to infinitely receive packets
> > +
> > + In case ``rx_pcap=`` configuration is set, user may want to use the
> > + selected PCAP file for rudimental performance testing. This can be done
> with a ``devarg`` ``infinite_rx``, for example::
> > +
> > + --vdev 'net_pcap0,rx_pcap=file_rx.pcap,infinite_rx=1,tx_drop=1'
> > +
> > + When this mode is used, it is recommended to use the ``tx_drop``
> ``devarg``.
> > +
> > +- Drop all packets on transmit
> > +
> > + The user may want to drop all packets on tx for a device. This can be done
> with the ``tx_drop`` ``devarg``, for example::
> > +
> > + --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_drop=1'
>
> Is there a performance drop when tx files are directed to 'loopback'
> interface,
> like: --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_iface=lo' ?
> If no performance drop I am for using 'lo' interface instead of adding new
> devargs.
>
While I did think of using /dev/null as method of dropping packets, I didn't think to use the loopback interface. The reason I implemented the tx_drop feature after trying 'tx_pcap=/dev/null' was that the file writes proved too expensive.
I've tried out the loopback interface as per your recommendation and it suffers from the same problem as using /dev/null. Below is a comparison of the three options for dropping packets.
= tx_drop =
CMD: testpmd -l 1,2 --file-prefix=basic -w 0:00.0 --vdev net_pcap0,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,infinite_rx=1,tx_drop=1 --vdev net_pcap1,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,infinite_rx=1,tx_drop=1 -- --no-flush-rx --rxq=1 --txq=1 --i
For each port:
Tx-pps: 22680856
= lo =
CMD: testpmd -l 1,2 --file-prefix=basic -w 0:00.0 --vdev net_pcap0,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,tx_iface=lo,infinite_rx=1 --vdev net_pcap1,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,tx_iface=lo,infinite_rx=1 -- --no-flush-rx --rxq=1 --txq=1 --i
For each port:
Tx-pps: 255507
= /dev/null =
CMD: testpmd -l 1,2 --file-prefix=basic -w 0:00.0 --vdev net_pcap0,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,tx_pcap=/dev/null,infinite_rx=1 --vdev net_pcap1,rx_pcap=/opt/cian/ovs/scripts/pcaps/udp.pcap,tx_pcap=/dev/null,infinite_rx=1 -- --no-flush-rx --rxq=1 --txq=1 --i
For each port:
Tx-pps: 3106131
For the case where the loopback or local interface is being used to drop packets, perf top shows that the majority of time is being spent in kernel functions such as tpacket_rcv, do_syscall_64 and kfree_skb.
For the case where the /dev/null is being used to drop packets, perf top shows that the majority of time is being spent in libc file operation functions such as _IO_fwrite and _IO_file_xsputn.
Tx_drop is 7.3 times faster than /dev/null and ~89 times faster than lo so it's the preferred method for dropping packets.
> > +
> > Examples of Usage
> > ^^^^^^^^^^^^^^^^^
> >
> > diff --git a/drivers/net/pcap/rte_eth_pcap.c
> > b/drivers/net/pcap/rte_eth_pcap.c index 353538f16..b72db973a 100644
> > --- a/drivers/net/pcap/rte_eth_pcap.c
> > +++ b/drivers/net/pcap/rte_eth_pcap.c
> <...>
>
> > @@ -178,6 +186,40 @@ eth_pcap_gather_data(unsigned char *data, struct
> rte_mbuf *mbuf)
> > }
> > }
> >
> > +static uint16_t
> > +eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t
> > +nb_pkts) {
> > + int i;
> > + struct pcap_rx_queue *pcap_q = queue;
> > +
> > + if (unlikely(nb_pkts == 0))
> > + return 0;
> > +
> > + if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
> > + return 0;
> > +
> > + for (i = 0; i < nb_pkts; i++) {
> > + struct rte_mbuf *pcap_buf;
> > + int err = rte_ring_dequeue(pcap_q->pkts, (void
> **)&pcap_buf);
> > + if (err)
> > + rte_panic("failed to dequeue from pcap pkts
> ring\n");
>
> Please don't have any rte_panic() in the driver code, there are a few more in
> patch.
>
Fixed in the next version.
> > +
> > + rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
> > + rte_pktmbuf_mtod(pcap_buf, void *),
> > + pcap_buf->data_len);
> > + bufs[i]->data_len = pcap_buf->data_len;
> > + bufs[i]->pkt_len = pcap_buf->pkt_len;
> > + bufs[i]->port = pcap_q->port_id;
> > +
> > + /* enqueue packet back on ring to allow infinite rx */
> > + rte_ring_enqueue(pcap_q->pkts, pcap_buf);
> > + }
> > +
> > + pcap_q->rx_stat.pkts += i;
>
> For consistency, can you please update "pcap_q->rx_stat.bytes" too?
>
Fixed in the next version.
> > +
> > + return i;
> > +}
> > +
> > static uint16_t
> > eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) {
>
> <...>
>
> > @@ -447,6 +509,24 @@ open_single_rx_pcap(const char *pcap_filename,
> pcap_t **pcap)
> > return 0;
> > }
> >
> > +static uint64_t
> > +count_packets_in_pcaps(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
> {
> > + const u_char *packet;
> > + struct pcap_pkthdr header;
> > + uint64_t pcap_pkt_count;
>
> Compiler is complaining about uninitialized 'pcap_pkt_count'.
>
This is valid and I see the same warning. Not sure why I didn't see this before, I don't remember specifically ignoring this warning...
Fixed in the next version.
> > +
> > + while ((packet = pcap_next(*pcap, &header)))
> > + pcap_pkt_count++;
>
> It seems there is no quicker way to get number of packets from a pcap file, if
> anyone know a way, comment is welcome.
>
> > +
> > + /* the pcap is reopened so it can be used as normal later */
> > + pcap_close(*pcap);
> > + *pcap = NULL;
> > + open_single_rx_pcap(pcap_q->name, pcap);
> > +
> > + return pcap_pkt_count;
> > +}
> > +
> > static int
> > eth_dev_start(struct rte_eth_dev *dev) {
>
> <...>
>
> > @@ -671,6 +752,49 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
> > pcap_q->queue_id = rx_queue_id;
> > dev->data->rx_queues[rx_queue_id] = pcap_q;
> >
> > + if (internals->infinite_rx) {
> > + struct pmd_process_private *pp;
> > + char ring_name[NAME_MAX];
> > + uint16_t num_rx = 0;
> > + static uint32_t ring_number;
> > + uint64_t pcap_pkt_count = 0;
> > + pcap_t **pcap;
> > +
> > + pp = rte_eth_devices[pcap_q->port_id].process_private;
> > + pcap = &pp->rx_pcap[pcap_q->queue_id];
> > +
> > + if (unlikely(*pcap == NULL))
> > + rte_panic("no packets in pcap to fill ring with\n");
> > +
> > + pcap_pkt_count = count_packets_in_pcaps(pcap, pcap_q);
> > +
> > + snprintf(ring_name, sizeof(ring_name), "PCAP_RING%"
> PRIu16,
> > + ring_number);
> > +
> > + pcap_q->pkts = rte_ring_create(ring_name,
> > + rte_align64pow2(pcap_pkt_count + 1), 0,
> > + RING_F_SP_ENQ | RING_F_SC_DEQ);
> > + ring_number++;
> > + if (!pcap_q->pkts)
> > + rte_panic("failed to alloc ring\n");
> > +
> > +
> > + struct rte_mbuf *bufs[pcap_pkt_count];
>
> Can you please move deceleration to the beginning of the function.
>
I have reworked this section so that we don't need to declare a variable length array of mbufs based on the amount of pcaps in the file passed.
> > + if (rte_pktmbuf_alloc_bulk(mb_pool, bufs, pcap_pkt_count)
> != 0)
> > + return 0;
>
> Should this return error?
>
Ooops, this should return an error, not success!!
Fixed in the next version.
> > +
> > + num_rx = eth_pcap_rx(pcap_q, bufs, pcap_pkt_count);
> > +
> > + /* check for multiseg mbufs */
> > + for (i = 0; i < num_rx; i++) {
> > + if (bufs[i]->nb_segs != 1)
> > + rte_panic("jumbo frames are not supported
> in infinite rx
> > +mode\n");
>
> This need to be replaced with returning error, and please remember to
> cleanup in that case.
>
Fixed in the next version.
> > + }
> > +
> > + rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs,
> > + num_rx, NULL);
> > + }
> > +
> > return 0;
> > }
> >
> <...>
>
> > @@ -1132,9 +1285,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
> > }
> > }
> >
> > - eth_dev->rx_pkt_burst = eth_pcap_rx;
> > + /* assign rx ops */
> > + if (infinite_rx) {
> > + eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
> > + internals->infinite_rx = infinite_rx;
>
> Since "infinite_rx" can be 0 or 1, isn't it better to set "internals->infinite_rx"
> out of this check, although functionally it will be same (since internals is all
> zero by default), logically better I think
>
Agreed, I've done this in the next version.
> > + } else {
> > + eth_dev->rx_pkt_burst = eth_pcap_rx;
> > + }
> >
> > - if (using_dumpers)
> > + /* assign tx ops */
> > + if (infinite_rx)
> > + eth_dev->tx_pkt_burst = eth_tx_drop;
>
> Shouldn't set 'tx_pkt_burst' with 'infinite_rx' check, I guess intention was
> 'tx_drop' check.
>
Correct, this should check tx_drop! Fixed in the next version.
> > + else if (using_dumpers)
> > eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
> > else
> > eth_dev->tx_pkt_burst = eth_pcap_tx;
>
> <...>
>
> > @@ -1217,6 +1380,17 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
> > pcaps.num_of_queue = 0;
> >
> > if (is_rx_pcap) {
> > + /*
> > + * We check whether we want to infinitely rx the pcap file
> > + */
> > + if (rte_kvargs_count(kvlist, ETH_PCAP_INFINITE_RX_ARG)
> == 1) {
>
> What do you think printing a warning if user provided the value more than
> once?
>
Sounds like a good idea, I've added this in the next version.
> > + ret = rte_kvargs_process(kvlist,
> > + ETH_PCAP_INFINITE_RX_ARG,
> > + &get_infinite_rx_arg, &infinite_rx);
> > + if (ret < 0)
> > + goto free_kvlist;
> > + }
> > +
> > ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
> > &open_rx_pcap, &pcaps);
> > } else {
> > @@ -1228,18 +1402,30 @@ pmd_pcap_probe(struct rte_vdev_device
> *dev)
> > goto free_kvlist;
> >
> > /*
> > - * We check whether we want to open a TX stream to a real NIC or a
> > - * pcap file
> > + * We check whether we want to open a TX stream to a real NIC,
> > + * a pcap file, or drop packets on tx
> > */
> > is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1
> : 0;
> > dumpers.num_of_queue = 0;
> >
> > - if (is_tx_pcap)
> > + tx_drop = rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) ? 1 :
> 0;
>
> Can drop this line, below already getting the 'tx_drop' value, overwriting this
> one.
>
> > + if (rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) == 1) {
> > + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_DROP_ARG,
> > + &get_tx_drop_arg, &tx_drop);
> > + if (ret < 0)
> > + goto free_kvlist;
> > + }
> > +
> > + if (tx_drop) {
> > + /* add dummy queue which counts dropped packets */
> > + ret = add_queue(&dumpers, "dummy", "tx_drop", NULL,
> NULL);
>
> This will break the multiple queue use case. Instead of adding a single queue,
> need to find a way to add multiple queues as user want and all will just ignore
> the packtets.
>
I've fixed this in the latest version so one drop queue is added per rxq on the device.
> > + } else if (is_tx_pcap) {
> > ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
> > &open_tx_pcap, &dumpers);
> > - else
> > + } else {
> > ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
> > &open_tx_iface, &dumpers);
> > + }
> >
> > if (ret < 0)
> > goto free_kvlist;
>
> <...>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-06-05 14:34 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-11 11:00 [dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file Cian Ferriter
2019-04-11 11:00 ` Cian Ferriter
2019-05-27 15:15 ` Ferruh Yigit
2019-06-05 14:34 ` Ferriter, Cian
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).