* [dpdk-dev] [PATCH 3/4] examples: remove useless debug flags
2016-04-22 13:43 [dpdk-dev] [PATCH 0/4] cleanup debug and dead code Thomas Monjalon
2016-04-22 13:43 ` [dpdk-dev] [PATCH 1/4] eal: increase log level of some messages Thomas Monjalon
2016-04-22 13:43 ` [dpdk-dev] [PATCH 2/4] log: increase default level to info Thomas Monjalon
@ 2016-04-22 13:43 ` Thomas Monjalon
2016-04-28 20:15 ` Yuanhan Liu
2016-04-22 13:44 ` [dpdk-dev] [PATCH 4/4] eal: add assert macro for debug Thomas Monjalon
2016-05-02 13:37 ` [dpdk-dev] [PATCH 0/4] cleanup debug and dead code Thomas Monjalon
4 siblings, 1 reply; 12+ messages in thread
From: Thomas Monjalon @ 2016-04-22 13:43 UTC (permalink / raw)
To: dev
The debug logs must be enabled at compile-time and run-time.
There are also some internal flags in some examples to enable the debug
logs of the applications. They are now enabled in debug configs and
can be disabled thanks to the more generic logtype mechanism:
rte_set_log_type(RTE_LOGTYPE_USER1, 0);
Removing these #ifdef allows to test these code branches more easily
and avoid dead code pitfalls.
Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
---
examples/distributor/main.c | 19 ++----
examples/ipsec-secgw/esp.c | 14 ++--
examples/ipsec-secgw/ipsec.c | 6 +-
examples/ipsec-secgw/ipsec.h | 3 -
examples/l3fwd-acl/main.c | 3 +
examples/packet_ordering/main.c | 18 ++---
examples/tep_termination/main.c | 7 +-
examples/tep_termination/main.h | 8 ---
examples/vhost/main.c | 143 +++++++++++++++++++++-------------------
examples/vhost/main.h | 12 ----
examples/vhost_xen/main.c | 62 +++++++----------
examples/vhost_xen/main.h | 11 ----
12 files changed, 120 insertions(+), 186 deletions(-)
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index c0201a9..0266d95 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -52,19 +52,6 @@
#define BURST_SIZE 32
#define RTE_RING_SZ 1024
-/* uncommnet below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
/* mask of enabled ports */
@@ -240,7 +227,8 @@ lcore_rx(struct lcore_params *p)
uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss due to full ring\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
}
@@ -271,7 +259,8 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
app_stats.tx.tx_pkts += nb_tx;
if (unlikely(nb_tx < outbuf->count)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss with tx_burst\n", __func__);
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 1927380..4611631 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -83,7 +83,7 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -129,7 +129,7 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
IPSEC_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
@@ -140,13 +140,13 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
if (padding[i] != i) {
- IPSEC_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
+ RTE_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- IPSEC_LOG(ERR, IPSEC_ESP,
+ RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
@@ -180,7 +180,7 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
/* Check maximum packet size */
if (unlikely(IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len +
sa->digest_len > IP_MAXPACKET)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
+ RTE_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
@@ -195,7 +195,7 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
esp->spi = sa->spi;
esp->seq = htonl(sa->seq++);
- IPSEC_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
+ RTE_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
/* Fill pad_len using default sequential scheme */
for (i = 0; i < pad_len - 2; i++)
@@ -243,7 +243,7 @@ esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
IPSEC_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index baf30d4..0654c06 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -58,13 +58,13 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
- IPSEC_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
+ RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u\n", key.lcore_id, key.cipher_algo,
key.auth_algo);
return -1;
}
- IPSEC_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
"%u qp %u\n", sa->spi, ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
@@ -87,7 +87,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- IPSEC_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index a13fdef..50037e2 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -52,11 +52,8 @@
if (!(exp)) { \
rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
}
-
-#define IPSEC_LOG RTE_LOG
#else
#define IPSEC_ASSERT(exp) do {} while (0)
-#define IPSEC_LOG(...) do {} while (0)
#endif /* IPSEC_DEBUG */
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 26d9f5e..c8347a2 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -72,6 +72,9 @@
#include <rte_string_fns.h>
#include <rte_acl.h>
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define L3FWDACL_DEBUG
+#endif
#define DO_RFC_1812_CHECKS
#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 15bb900..3c88b86 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -55,17 +55,6 @@
#define RING_SIZE 16384
-/* uncomment below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
@@ -240,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
/* free the mbufs which failed from transmit */
app_stats.tx.ro_tx_failed_pkts += count;
- LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
pktmbuf_free_bulk(unsent, count);
}
@@ -421,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
pkts, MAX_PKTS_BURST);
if (nb_rx_pkts == 0) {
- LOG_DEBUG(REORDERAPP,
+ RTE_LOG(DEBUG, REORDERAPP,
"%s():Received zero packets\n", __func__);
continue;
}
@@ -533,7 +522,8 @@ send_thread(struct send_thread_args *args)
if (ret == -1 && rte_errno == ERANGE) {
/* Too early pkts should be transmitted out directly */
- LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
+ RTE_LOG(DEBUG, REORDERAPP,
+ "%s():Cannot reorder early packet "
"direct enqueuing to TX\n", __func__);
outp = mbufs[i]->port;
if ((portmask & (1 << outp)) == 0) {
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index f97d552..9af030d 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -568,7 +568,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
const uint16_t lcore_id = rte_lcore_id();
struct virtio_net *dev = vdev->dev;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: MAC address is external\n",
dev->device_fh);
/* Add packet to the port tx queue */
@@ -651,7 +651,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after "
+ RTE_LOG(DEBUG, VHOST_DATA, "TX queue drained after "
"timeout with burst size %u\n",
tx_q->len);
ret = overlay_options.tx_handle(ports[0],
@@ -1220,9 +1220,6 @@ main(int argc, char *argv[])
for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
vpool_array[queue_id].pool = mbuf_pool;
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
diff --git a/examples/tep_termination/main.h b/examples/tep_termination/main.h
index a34301a..4b123ab 100644
--- a/examples/tep_termination/main.h
+++ b/examples/tep_termination/main.h
@@ -36,14 +36,6 @@
#include <rte_ether.h>
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 28c17af..78fd1ab 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -877,7 +877,7 @@ static unsigned check_ports_num(unsigned nb_ports)
* Macro to print out packet contents. Wrapped in debug define so that the
* data path is not effected when debug is disabled.
*/
-#ifdef DEBUG
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
#define PRINT_PACKET(device, addr, size, header) do { \
char *pkt_addr = (char*)(addr); \
unsigned int index; \
@@ -893,7 +893,7 @@ static unsigned check_ports_num(unsigned nb_ports)
} \
snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
\
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
+ RTE_LOG(DEBUG, VHOST_DATA, "%s", packet); \
} while(0)
#else
#define PRINT_PACKET(device, addr, size, header) do{} while(0)
@@ -927,7 +927,7 @@ gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa,
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| HPA %p\n",
vdev->dev->device_fh, (void *)(uintptr_t)guest_pa,
(void *)(uintptr_t)vhost_pa);
@@ -1059,18 +1059,22 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "Source and destination MAC addresses are the same. "
+ "Dropping packet.\n",
+ dev->device_fh);
return 0;
}
tdev = dev_ll->vdev->dev;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is local\n", tdev->device_fh);
if (unlikely(dev_ll->vdev->remove)) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ "Device is marked for removal\n", tdev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
@@ -1114,7 +1118,7 @@ find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
* destined for the TX device.
*/
if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") TX: Source and destination"
" MAC addresses are the same. Dropping "
"packet.\n",
@@ -1132,7 +1136,7 @@ find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
(uint16_t)
vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") TX: pkt to local VM device id:"
"(%"PRIu64") vlan tag: %d.\n",
dev->device_fh, dev_ll->vdev->dev->device_fh,
@@ -1200,7 +1204,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
@@ -1310,7 +1315,9 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
@@ -1444,7 +1451,7 @@ get_available_ring_index_zcp(struct virtio_net *dev,
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
free_entries = (avail_idx - *res_base_idx);
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") in get_available_ring_index_zcp: "
"avail idx: %d, "
"res base idx:%d, free entries:%d\n",
dev->device_fh, avail_idx, *res_base_idx,
@@ -1469,7 +1476,7 @@ get_available_ring_index_zcp(struct virtio_net *dev,
count = free_entries;
if (unlikely(count == 0)) {
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") Fail in get_available_ring_index_zcp: "
"avail idx: %d, res base idx:%d, free entries:%d\n",
dev->device_fh, avail_idx,
@@ -1568,7 +1575,7 @@ attach_rxmbuf_zcp(struct virtio_net *dev)
rte_ring_sc_dequeue(vpool->ring, &obj);
mbuf = obj;
if (unlikely(mbuf == NULL)) {
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: "
"ring_sc_dequeue fail.\n",
dev->device_fh);
@@ -1577,7 +1584,7 @@ attach_rxmbuf_zcp(struct virtio_net *dev)
}
if (unlikely(vpool->buf_size > desc->len)) {
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
"length(%d) of descriptor idx: %d less than room "
"size required: %d\n",
@@ -1593,7 +1600,7 @@ attach_rxmbuf_zcp(struct virtio_net *dev)
mbuf->data_len = desc->len;
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
"descriptor idx:%d\n",
dev->device_fh, res_base_idx, desc_idx);
@@ -1644,11 +1651,11 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
uint32_t index = 0;
uint32_t mbuf_count = rte_mempool_count(vpool->pool);
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
"clean is: %d\n",
dev->device_fh, mbuf_count);
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
"clean is : %d\n",
dev->device_fh, rte_ring_count(vpool->ring));
@@ -1666,22 +1673,22 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
used_idx = (used_idx + 1) & (vq->size - 1);
}
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
"clean is: %d\n",
dev->device_fh, rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
"clean is : %d\n",
dev->device_fh, rte_ring_count(vpool->ring));
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: before updated "
"vq->last_used_idx:%d\n",
dev->device_fh, vq->last_used_idx);
vq->last_used_idx += mbuf_count;
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: after updated "
"vq->last_used_idx:%d\n",
dev->device_fh, vq->last_used_idx);
@@ -1706,11 +1713,11 @@ static void mbuf_destroy_zcp(struct vpool *vpool)
struct rte_mbuf *mbuf = NULL;
uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in mempool before "
"mbuf_destroy_zcp is: %d\n",
mbuf_count);
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in ring before "
"mbuf_destroy_zcp is : %d\n",
rte_ring_count(vpool->ring));
@@ -1724,11 +1731,11 @@ static void mbuf_destroy_zcp(struct vpool *vpool)
}
}
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in mempool after "
"mbuf_destroy_zcp is: %d\n",
rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in ring after "
"mbuf_destroy_zcp is : %d\n",
rte_ring_count(vpool->ring));
@@ -1752,7 +1759,8 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
uint32_t head_idx, packet_success = 0;
uint16_t res_cur_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n",
+ dev->device_fh);
if (count == 0)
return 0;
@@ -1761,7 +1769,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
res_cur_idx = vq->last_used_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_cur_idx + count);
/* Retrieve all of the head indexes first to avoid caching issues. */
@@ -1776,7 +1784,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
desc = &vq->desc[head[packet_success]];
buff = pkts[packet_success];
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: update the used idx for "
"pkt[%d] descriptor idx: %d\n",
dev->device_fh, packet_success,
@@ -1825,7 +1833,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
rte_compiler_barrier();
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: before update used idx: "
"vq.last_used_idx: %d, vq->used->idx: %d\n",
dev->device_fh, vq->last_used_idx, vq->used->idx);
@@ -1833,7 +1841,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
*(volatile uint16_t *)&vq->used->idx += count;
vq->last_used_idx += count;
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: after update used idx: "
"vq.last_used_idx: %d, vq->used->idx: %d\n",
dev->device_fh, vq->last_used_idx, vq->used->idx);
@@ -1918,7 +1926,7 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
tx_q->m_table[len] = mbuf;
len++;
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
dev->device_fh,
mbuf->nb_segs,
@@ -1980,7 +1988,8 @@ virtio_dev_tx_zcp(struct virtio_net *dev)
if (vq->last_used_idx_res == avail_idx)
return;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
@@ -1992,7 +2001,7 @@ virtio_dev_tx_zcp(struct virtio_net *dev)
free_entries
= (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
@@ -2129,7 +2138,7 @@ switch_worker_zcp(__attribute__((unused)) void *arg)
if (likely(!vdev->remove)) {
tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA,
+ RTE_LOG(DEBUG, VHOST_DATA,
"TX queue drained after timeout"
" with burst size %u\n",
tx_q->len);
@@ -2465,14 +2474,14 @@ destroy_device (volatile struct virtio_net *dev)
/* Stop the RX queue. */
if (rte_eth_dev_rx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") In destroy_device: Failed to stop "
"rx queue:%d\n",
dev->device_fh,
vdev->vmdq_rx_q);
}
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") in destroy_device: Start put mbuf in "
"mempool back to ring for RX queue: %d\n",
dev->device_fh, vdev->vmdq_rx_q);
@@ -2481,7 +2490,7 @@ destroy_device (volatile struct virtio_net *dev)
/* Stop the TX queue. */
if (rte_eth_dev_tx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") In destroy_device: Failed to "
"stop tx queue:%d\n",
dev->device_fh, vdev->vmdq_rx_q);
@@ -2489,7 +2498,7 @@ destroy_device (volatile struct virtio_net *dev)
vpool = &vpool_array[vdev->vmdq_rx_q + MAX_QUEUES];
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") destroy_device: Start put mbuf in mempool "
"back to ring for TX queue: %d, dev:(%"PRIu64")\n",
dev->device_fh, (vdev->vmdq_rx_q + MAX_QUEUES),
@@ -2513,14 +2522,14 @@ check_hpa_regions(uint64_t vva_start, uint64_t size)
uint32_t i, nregions = 0, page_size = getpagesize();
uint64_t cur_phys_addr = 0, next_phys_addr = 0;
if (vva_start % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in check_countinous: vva start(%p) mod page_size(%d) "
"has remainder\n",
(void *)(uintptr_t)vva_start, page_size);
return 0;
}
if (size % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in check_countinous: "
"size((%"PRIu64")) mod page_size(%d) has remainder\n",
size, page_size);
@@ -2533,13 +2542,13 @@ check_hpa_regions(uint64_t vva_start, uint64_t size)
(void *)(uintptr_t)(vva_start + i + page_size));
if ((cur_phys_addr + page_size) != next_phys_addr) {
++nregions;
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in check_continuous: hva addr:(%p) is not "
"continuous with hva addr:(%p), diff:%d\n",
(void *)(uintptr_t)(vva_start + (uint64_t)i),
(void *)(uintptr_t)(vva_start + (uint64_t)i
+ page_size), page_size);
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in check_continuous: hpa addr:(%p) is not "
"continuous with hpa addr:(%p), "
"diff:(%"PRIu64")\n",
@@ -2574,12 +2583,12 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) -
mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
@@ -2599,12 +2608,12 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
k + page_size;
mem_region_hpa[regionidx_hpa].memory_size
= k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
+ RTE_LOG(DEBUG, VHOST_CONFIG, "in fill_hpa_regions: guest "
"phys addr end [%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr "
"size [%d]:(%p)\n",
regionidx_hpa,
@@ -2616,12 +2625,12 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
next_phys_addr -
mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
+ RTE_LOG(DEBUG, VHOST_CONFIG, "in fill_hpa_regions: guest"
" phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in fill_hpa_regions: host phys addr "
"start[%d]:(%p)\n",
regionidx_hpa,
@@ -2636,11 +2645,11 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
= mem_region_hpa[regionidx_hpa].guest_phys_address
+ k + page_size;
mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
+ RTE_LOG(DEBUG, VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
+ RTE_LOG(DEBUG, VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].memory_size));
@@ -2730,12 +2739,12 @@ new_device (struct virtio_net *dev)
count_in_ring = rte_ring_count(vpool_array[index].ring);
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") in new_device: mbuf count in mempool "
"before attach is: %d\n",
dev->device_fh,
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") in new_device: mbuf count in ring "
"before attach is : %d\n",
dev->device_fh, count_in_ring);
@@ -2746,12 +2755,12 @@ new_device (struct virtio_net *dev)
for (i = 0; i < count_in_ring; i++)
attach_rxmbuf_zcp(dev);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "mempool after attach is: %d\n",
+ RTE_LOG(DEBUG, VHOST_CONFIG, "(%" PRIu64 ") in new_device: "
+ "mbuf count in mempool after attach is: %d\n",
dev->device_fh,
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "ring after attach is : %d\n",
+ RTE_LOG(DEBUG, VHOST_CONFIG, "(%" PRIu64 ") in new_device: "
+ "mbuf count in ring after attach is : %d\n",
dev->device_fh,
rte_ring_count(vpool_array[index].ring));
@@ -2761,7 +2770,7 @@ new_device (struct virtio_net *dev)
if (rte_eth_dev_tx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to start "
"tx queue:%d\n",
dev->device_fh, vdev->vmdq_rx_q);
@@ -2775,7 +2784,7 @@ new_device (struct virtio_net *dev)
if (rte_eth_dev_rx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to start "
"rx queue:%d\n",
dev->device_fh, vdev->vmdq_rx_q);
@@ -2783,7 +2792,7 @@ new_device (struct virtio_net *dev)
/* Stop the TX queue. */
if (rte_eth_dev_tx_queue_stop(ports[0],
vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to "
"stop tx queue:%d\n",
dev->device_fh, vdev->vmdq_rx_q);
@@ -2919,11 +2928,11 @@ setup_mempool_tbl(int socket, uint32_t index, char *pool_name,
rte_align32pow2(nb_mbuf + 1),
socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
if (likely(vpool_array[index].ring != NULL)) {
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in setup_mempool_tbl: mbuf count in "
"mempool is: %d\n",
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in setup_mempool_tbl: mbuf count in "
"ring is: %d\n",
rte_ring_count(vpool_array[index].ring));
@@ -3021,7 +3030,7 @@ main(int argc, char *argv[])
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Enable loop back for L2 switch in vmdq.\n");
}
} else {
@@ -3059,12 +3068,10 @@ main(int argc, char *argv[])
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Enable loop back for L2 switch in vmdq.\n");
}
}
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
@@ -3126,10 +3133,10 @@ main(int argc, char *argv[])
(void *)mbuf);
}
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in main: mbuf count in mempool at initial "
"is: %d\n", count_in_mempool);
- LOG_DEBUG(VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"in main: mbuf count in ring at initial is :"
" %d\n",
rte_ring_count(vpool_array[index].ring));
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index d04e2be..4f5ccf5 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -34,18 +34,6 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index d83138d..8018677 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -507,32 +507,6 @@ static unsigned check_ports_num(unsigned nb_ports)
}
/*
- * Macro to print out packet contents. Wrapped in debug define so that the
- * data path is not effected when debug is disabled.
- */
-#ifdef DEBUG
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char*)(addr); \
- unsigned int index; \
- char packet[MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while(0)
-#else
-#define PRINT_PACKET(device, addr, size, header) do{} while(0)
-#endif
-
-/*
* Function to convert guest physical addresses to vhost virtual addresses. This
* is used to convert virtio buffer addresses.
*/
@@ -551,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
break;
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
@@ -581,7 +555,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
uint8_t success = 0;
void *userdata;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
@@ -606,7 +580,8 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
@@ -800,17 +775,22 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "Source and destination MAC addresses are the same. "
+ "Dropping packet.\n",
+ dev_ll->dev->device_fh);
return 0;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ "Device is marked for removal\n",
+ dev_ll->dev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = virtio_dev_rx(dev_ll->dev, &m, 1);
@@ -849,7 +829,8 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
@@ -922,7 +903,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
if (vq->last_used_idx == avail_idx)
return;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
@@ -931,7 +913,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
@@ -1020,7 +1003,9 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
@@ -1482,9 +1467,6 @@ main(int argc, char *argv[])
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
diff --git a/examples/vhost_xen/main.h b/examples/vhost_xen/main.h
index 481572e..5ff48fd 100644
--- a/examples/vhost_xen/main.h
+++ b/examples/vhost_xen/main.h
@@ -34,17 +34,6 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) \
- RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
--
2.7.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 4/4] eal: add assert macro for debug
2016-04-22 13:43 [dpdk-dev] [PATCH 0/4] cleanup debug and dead code Thomas Monjalon
` (2 preceding siblings ...)
2016-04-22 13:43 ` [dpdk-dev] [PATCH 3/4] examples: remove useless debug flags Thomas Monjalon
@ 2016-04-22 13:44 ` Thomas Monjalon
2016-04-22 19:51 ` Yuanhan Liu
2016-05-02 13:37 ` [dpdk-dev] [PATCH 0/4] cleanup debug and dead code Thomas Monjalon
4 siblings, 1 reply; 12+ messages in thread
From: Thomas Monjalon @ 2016-04-22 13:44 UTC (permalink / raw)
To: dev
The macro RTE_VERIFY always checks a condition.
It is optimized with "unlikely" hint.
While this macro is well suited for test applications, it is preferred
in libraries and examples to enable such check in debug mode.
That's why the macro RTE_ASSERT is introduced to call RTE_VERIFY only
if built with debug logs enabled.
A lot of assert macros were duplicated and enabled with a specific flag.
Removing these #ifdef allows to test these code branches more easily
and avoid dead code pitfalls.
The ENA_ASSERT is kept (in debug mode only) because it has more
parameters to log.
Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
---
doc/guides/sample_app_ug/ipv4_multicast.rst | 4 ++--
drivers/net/bonding/rte_eth_bond_8023ad.c | 13 +++++-----
drivers/net/bonding/rte_eth_bond_api.c | 4 ++--
drivers/net/bonding/rte_eth_bond_pmd.c | 4 ++--
drivers/net/ena/base/ena_plat_dpdk.h | 10 +++++---
drivers/net/enic/enic.h | 10 --------
drivers/net/enic/enic_rx.c | 4 ++--
drivers/net/vmxnet3/vmxnet3_ethdev.h | 8 -------
drivers/net/vmxnet3/vmxnet3_rxtx.c | 16 ++++++-------
drivers/net/xenvirt/rte_mempool_gntalloc.c | 2 +-
examples/ipsec-secgw/esp.c | 28 +++++++++++-----------
examples/ipsec-secgw/ipip.h | 10 ++++----
examples/ipsec-secgw/ipsec.c | 6 ++---
examples/ipsec-secgw/ipsec.h | 9 -------
examples/ipv4_multicast/main.c | 4 ++--
examples/performance-thread/common/lthread.c | 2 +-
examples/performance-thread/common/lthread_int.h | 12 ----------
examples/performance-thread/common/lthread_mutex.c | 3 +--
examples/performance-thread/common/lthread_pool.h | 4 ++--
examples/performance-thread/common/lthread_queue.h | 2 +-
examples/performance-thread/common/lthread_sched.c | 2 +-
examples/performance-thread/common/lthread_tls.c | 4 ++--
lib/librte_eal/common/include/rte_debug.h | 9 ++++++-
lib/librte_ip_frag/ip_frag_common.h | 8 -------
lib/librte_ip_frag/rte_ipv4_fragmentation.c | 2 +-
lib/librte_ip_frag/rte_ipv6_fragmentation.c | 2 +-
lib/librte_mbuf/rte_mbuf.c | 10 ++++----
lib/librte_mbuf/rte_mbuf.h | 23 ++++++------------
lib/librte_mempool/rte_mempool.c | 4 ++--
lib/librte_sched/rte_red.c | 2 +-
lib/librte_sched/rte_red.h | 25 +++++--------------
31 files changed, 94 insertions(+), 152 deletions(-)
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 67ea944..72da8c4 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -193,7 +193,7 @@ Firstly, the Ethernet* header is removed from the packet and the IPv4 address is
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
Then, the packet is checked to see if it has a multicast destination address and
@@ -271,7 +271,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, ðdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 8b4db50..c0ed44d 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -512,7 +512,7 @@ mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
/* attach mux to aggregator */
- RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
+ RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
STATE_DISTRIBUTING)) == 0);
ACTOR_STATE_SET(port, SYNCHRONIZATION);
@@ -813,7 +813,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct lacpdu_header *lacp;
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
/* This is LACP frame so pass it to rx_machine */
rx_machine(internals, slave_id, &lacp->lacpdu);
@@ -856,8 +856,9 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
uint16_t q_id;
/* Given slave mus not be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) == internals->active_slave_count);
+ RTE_SET_USED(internals); /* used only for assert when enabled */
memcpy(&port->actor, &initial, sizeof(struct port_params));
/* Standard requires that port ID must be grater than 0.
@@ -880,8 +881,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
if (port->mbuf_pool != NULL)
return;
- RTE_VERIFY(port->rx_ring == NULL);
- RTE_VERIFY(port->tx_ring == NULL);
+ RTE_ASSERT(port->rx_ring == NULL);
+ RTE_ASSERT(port->tx_ring == NULL);
socket_id = rte_eth_devices[slave_id].data->numa_node;
element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
@@ -939,7 +940,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
uint8_t i;
/* Given slave must be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) < internals->active_slave_count);
/* Exclude slave from transmit policy. If this slave is an aggregator
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index e9247b5..d3bda35 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -95,7 +95,7 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
internals->tlb_slaves_order[active_count] = port_id;
}
- RTE_VERIFY(internals->active_slave_count <
+ RTE_ASSERT(internals->active_slave_count <
(RTE_DIM(internals->active_slaves) - 1));
internals->active_slaves[internals->active_slave_count] = port_id;
@@ -134,7 +134,7 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
sizeof(internals->active_slaves[0]));
}
- RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
+ RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
internals->active_slave_count = active_count;
if (eth_dev->data->dev_started) {
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index c897146..129f04b 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1608,11 +1608,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
for (i = 0; i < internals->active_slave_count; i++) {
port = &mode_8023ad_ports[internals->active_slaves[i]];
- RTE_VERIFY(port->rx_ring != NULL);
+ RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
- RTE_VERIFY(port->tx_ring != NULL);
+ RTE_ASSERT(port->tx_ring != NULL);
while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
}
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index aab2ac8..5f69330 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -93,14 +93,18 @@ typedef uint64_t dma_addr_t;
#define ENA_GET_SYSTEM_USECS() \
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
#define ENA_ASSERT(cond, format, arg...) \
do { \
if (unlikely(!(cond))) { \
- printf("Assertion failed on %s:%s:%d: " format, \
- __FILE__, __func__, __LINE__, ##arg); \
- rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \
+ RTE_LOG(ERR, PMD, format, ##arg); \
+ rte_panic("line %d\tassert \"" #cond "\"" \
+ "failed\n", __LINE__); \
} \
} while (0)
+#else
+#define ENA_ASSERT(cond, format, arg...) do {} while (0)
+#endif
#define ENA_MAX32(x, y) RTE_MAX((x), (y))
#define ENA_MAX16(x, y) RTE_MAX((x), (y))
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 8c914f5..09f3853 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -155,16 +155,6 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
return (struct enic *)eth_dev->data->dev_private;
}
-#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#define ASSERT(x) do { \
- if (!(x)) \
- rte_panic("ENIC: x"); \
-} while (0)
-#else
-#define ASSERT(x)
-#endif
-
extern void enic_fdir_stats_get(struct enic *enic,
struct rte_eth_fdir_stats *stats);
extern int enic_fdir_add_fltr(struct enic *enic,
diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
index 232987a..b3ad9ea 100644
--- a/drivers/net/enic/enic_rx.c
+++ b/drivers/net/enic/enic_rx.c
@@ -238,8 +238,8 @@ static inline uint32_t
enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
{
uint32_t d = i0 + i1;
- ASSERT(i0 < n_descriptors);
- ASSERT(i1 < n_descriptors);
+ RTE_ASSERT(i0 < n_descriptors);
+ RTE_ASSERT(i1 < n_descriptors);
d -= (d >= n_descriptors) ? n_descriptors : 0;
return d;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 4f9d0bd..1be833a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,14 +34,6 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-#define VMXNET3_ASSERT(x) do { \
- if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
-} while(0)
-#else
-#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
-#endif
-
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 4ac0456..b7486cd 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -296,7 +296,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
struct rte_mbuf *mbuf;
/* Release cmd_ring descriptor and free mbuf */
- VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+ RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
mbuf = txq->cmd_ring.buf_info[eop_idx].m;
if (mbuf == NULL)
@@ -307,7 +307,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
while (txq->cmd_ring.next2comp != eop_idx) {
/* no out-of-order completion */
- VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+ RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
completed++;
}
@@ -454,7 +454,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (tso) {
uint16_t mss = txm->tso_segsz;
- VMXNET3_ASSERT(mss > 0);
+ RTE_ASSERT(mss > 0);
gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
gdesc->txd.om = VMXNET3_OM_TSO;
@@ -662,8 +662,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
- VMXNET3_ASSERT(rcd->len <= rxd->len);
- VMXNET3_ASSERT(rbi->m);
+ RTE_ASSERT(rcd->len <= rxd->len);
+ RTE_ASSERT(rbi->m);
/* Get the packet buffer pointer from buf_info */
rxm = rbi->m;
@@ -710,10 +710,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* the last mbuf of the current packet.
*/
if (rcd->sop) {
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
if (unlikely(rcd->len == 0)) {
- VMXNET3_ASSERT(rcd->eop);
+ RTE_ASSERT(rcd->eop);
PMD_RX_LOG(DEBUG,
"Rx buf was skipped. rxring[%d][%d])",
@@ -727,7 +727,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
} else {
struct rte_mbuf *start = rxq->start_seg;
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
start->pkt_len += rxm->data_len;
start->nb_segs++;
diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c
index 7bfbfda..e5c681e 100644
--- a/drivers/net/xenvirt/rte_mempool_gntalloc.c
+++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c
@@ -202,7 +202,7 @@ _create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
obj_init, obj_init_arg,
socket_id, flags, va, pa_arr, rpg_num, pg_shift);
- RTE_VERIFY(elt_num == mp->size);
+ RTE_ASSERT(elt_num == mp->size);
}
mgi.mp = mp;
mgi.pg_num = rpg_num;
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 4611631..0f6b33e 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -58,7 +58,7 @@ random_iv_u64(uint64_t *buf, uint16_t n)
unsigned left = n & 0x7;
unsigned i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
@@ -75,9 +75,9 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
int32_t payload_len;
struct rte_crypto_sym_op *sym_cop;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
sa->digest_len;
@@ -124,9 +124,9 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
@@ -165,9 +165,9 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
char *padding;
struct rte_crypto_sym_op *sym_cop;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
/* Payload length */
pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
@@ -186,7 +186,7 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
- IPSEC_ASSERT(padding != NULL);
+ RTE_ASSERT(padding != NULL);
ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
sa->src, sa->dst);
@@ -238,9 +238,9 @@ esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index 322076c..1307a12 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -49,13 +49,13 @@ ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)
inip = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
offset += sizeof(struct ip);
outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
- IPSEC_ASSERT(outip != NULL);
+ RTE_ASSERT(outip != NULL);
/* Per RFC4301 5.1.2.1 */
outip->ip_v = IPVERSION;
@@ -83,14 +83,14 @@ ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)
outip = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(outip->ip_v == IPVERSION);
+ RTE_ASSERT(outip->ip_v == IPVERSION);
offset += sizeof(struct ip);
inip = (struct ip *)rte_pktmbuf_adj(m, offset);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
/* Check packet is still bigger than IP header (inner) */
- IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
+ RTE_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
/* RFC4301 5.1.2.1 Note 6 */
if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 0654c06..4566d38 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -117,7 +117,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
sa = sas[i];
priv->sa = sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
@@ -139,7 +139,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
continue;
}
- IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+ RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
}
@@ -166,7 +166,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
priv = get_priv(pkt);
sa = priv->sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
ret = sa->post_crypto(pkt, sa, cops[j]);
if (unlikely(ret))
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 50037e2..e60fae6 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -47,15 +47,6 @@
#define MAX_PKT_BURST 32
#define MAX_QP_PER_LCORE 256
-#ifdef IPSEC_DEBUG
-#define IPSEC_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-#else
-#define IPSEC_ASSERT(exp) do {} while (0)
-#endif /* IPSEC_DEBUG */
-
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
#define uint32_t_to_char(ip, a, b, c, d) do {\
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 96b4157..f013d92 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -321,7 +321,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
/* Construct Ethernet header. */
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, ðdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
@@ -353,7 +353,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c
index 8fbff73..062275a 100644
--- a/examples/performance-thread/common/lthread.c
+++ b/examples/performance-thread/common/lthread.c
@@ -143,7 +143,7 @@ struct lthread_stack *_stack_alloc(void)
struct lthread_stack *s;
s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
- LTHREAD_ASSERT(s != NULL);
+ RTE_ASSERT(s != NULL);
s->root_sched = THIS_SCHED;
s->stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index c8357f4..b858b55 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -197,16 +197,4 @@ struct lthread {
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
-/*
- * Assert
- */
-#if LTHREAD_DIAG
-#define LTHREAD_ASSERT(expr) do { \
- if (!(expr)) \
- rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
-} while (0)
-#else
-#define LTHREAD_ASSERT(expr) do {} while (0)
-#endif
-
#endif /* LTHREAD_INT_H */
diff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c
index af8b82d..c1bc627 100644
--- a/examples/performance-thread/common/lthread_mutex.c
+++ b/examples/performance-thread/common/lthread_mutex.c
@@ -170,7 +170,6 @@ int lthread_mutex_lock(struct lthread_mutex *m)
_suspend();
/* resumed, must loop and compete for the lock again */
}
- LTHREAD_ASSERT(0);
return 0;
}
@@ -231,7 +230,7 @@ int lthread_mutex_unlock(struct lthread_mutex *m)
if (unblocked != NULL) {
rte_atomic64_dec(&m->count);
DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
- LTHREAD_ASSERT(unblocked->sched != NULL);
+ RTE_ASSERT(unblocked->sched != NULL);
_ready_queue_insert((struct lthread_sched *)
unblocked->sched, unblocked);
break;
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index a5f3251..27680ea 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -138,14 +138,14 @@ _qnode_pool_create(const char *name, int prealloc_size) {
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p);
+ RTE_ASSERT(p);
p->stub = rte_malloc_socket(NULL,
sizeof(struct qnode),
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p->stub);
+ RTE_ASSERT(p->stub);
if (name != NULL)
strncpy(p->name, name, LT_MAX_NAME_SIZE);
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 0c39516..2c55fce 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -129,7 +129,7 @@ _lthread_queue_create(const char *name)
/* allocated stub node */
stub = _qnode_alloc();
- LTHREAD_ASSERT(stub);
+ RTE_ASSERT(stub);
if (name != NULL)
strncpy(new_queue->name, name, sizeof(new_queue->name));
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 7c40bc0..c64c21f 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -268,7 +268,7 @@ struct lthread_sched *_lthread_sched_create(size_t stack_size)
struct lthread_sched *new_sched;
unsigned lcoreid = rte_lcore_id();
- LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
+ RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
if (stack_size == 0)
stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 43cda4f..6876f83 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -94,7 +94,7 @@ void _lthread_key_pool_init(void)
pool = rte_ring_create(name,
LTHREAD_MAX_KEYS, 0, 0);
- LTHREAD_ASSERT(pool);
+ RTE_ASSERT(pool);
int i;
@@ -240,7 +240,7 @@ void _lthread_tls_alloc(struct lthread *lt)
tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
- LTHREAD_ASSERT(tls != NULL);
+ RTE_ASSERT(tls != NULL);
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h
index 94129fa..9260eda 100644
--- a/lib/librte_eal/common/include/rte_debug.h
+++ b/lib/librte_eal/common/include/rte_debug.h
@@ -43,6 +43,8 @@
* the implementation is architecture-specific.
*/
+#include "rte_log.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -76,8 +78,13 @@ void rte_dump_registers(void);
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define RTE_ASSERT(exp) RTE_VERIFY(exp)
+#else
+#define RTE_ASSERT(exp) do {} while (0)
+#endif
#define RTE_VERIFY(exp) do { \
- if (!(exp)) \
+ if (unlikely(!(exp))) \
rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
} while (0)
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index cde6ed4..7076475 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -38,17 +38,9 @@
/* logging macros. */
#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
-
#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
-
-#define IP_FRAG_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
- __func__, __LINE__); \
-}
#else
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#define IP_FRAG_ASSERT(exp) do {} while (0)
#endif /* IP_FRAG_DEBUG */
#define IPV4_KEYLEN 1
diff --git a/lib/librte_ip_frag/rte_ipv4_fragmentation.c b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
index a4ed923..a2259e8 100644
--- a/lib/librte_ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
@@ -107,7 +107,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
/* Fragment size should be a multiply of 8. */
- IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
diff --git a/lib/librte_ip_frag/rte_ipv6_fragmentation.c b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
index 1e30004..db666bb 100644
--- a/lib/librte_ip_frag/rte_ipv6_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
@@ -110,7 +110,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
- IP_FRAG_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index dc0467c..eec1456 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -86,7 +86,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
struct rte_pktmbuf_pool_private default_mbp_priv;
uint16_t roomsz;
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
/* if no structure is provided, assume no mbuf private area */
user_mbp_priv = opaque_arg;
@@ -100,7 +100,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
user_mbp_priv = &default_mbp_priv;
}
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
user_mbp_priv->mbuf_data_room_size +
user_mbp_priv->mbuf_priv_size);
@@ -126,9 +126,9 @@ rte_pktmbuf_init(struct rte_mempool *mp,
mbuf_size = sizeof(struct rte_mbuf) + priv_size;
buf_len = rte_pktmbuf_data_room_size(mp);
- RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
- RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
- RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
+ RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
+ RTE_ASSERT(mp->elt_size >= mbuf_size);
+ RTE_ASSERT(buf_len <= UINT16_MAX);
memset(m, 0, mp->elt_size);
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 451921f..529debb 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -938,12 +938,6 @@ struct rte_pktmbuf_pool_private {
rte_mbuf_sanity_check(m, is_h); \
} while (0)
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
#else /* RTE_LIBRTE_MBUF_DEBUG */
/** check mbuf type in debug mode */
@@ -952,9 +946,6 @@ if (!(exp)) { \
/** check mbuf type in debug mode if mbuf pointer is not null */
#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) do { } while (0)
-
#endif /* RTE_LIBRTE_MBUF_DEBUG */
#ifdef RTE_MBUF_REFCNT_ATOMIC
@@ -1084,7 +1075,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
return m;
}
@@ -1100,7 +1091,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
static inline void __attribute__((always_inline))
__rte_mbuf_raw_free(struct rte_mbuf *m)
{
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mempool_put(m->pool, m);
}
@@ -1388,22 +1379,22 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
switch (count % 4) {
case 0:
while (idx != count) {
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 3:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 2:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 1:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
@@ -1431,7 +1422,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
{
struct rte_mbuf *md;
- RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) &&
+ RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
/* if m is not direct, get the mbuf that embeds the data */
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f8781e1..70812d9 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -795,8 +795,8 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
unsigned common_count;
unsigned cache_count;
- RTE_VERIFY(f != NULL);
- RTE_VERIFY(mp != NULL);
+ RTE_ASSERT(f != NULL);
+ RTE_ASSERT(mp != NULL);
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
diff --git a/lib/librte_sched/rte_red.c b/lib/librte_sched/rte_red.c
index fdf4057..ade57d1 100644
--- a/lib/librte_sched/rte_red.c
+++ b/lib/librte_sched/rte_red.c
@@ -77,7 +77,7 @@ __rte_red_init_tables(void)
scale = 1024.0;
- RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
+ RTE_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
double n = (double)i;
diff --git a/lib/librte_sched/rte_red.h b/lib/librte_sched/rte_red.h
index 7f9ac90..ca12227 100644
--- a/lib/librte_sched/rte_red.h
+++ b/lib/librte_sched/rte_red.h
@@ -63,19 +63,6 @@ extern "C" {
#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
-#ifdef RTE_RED_DEBUG
-
-#define RTE_RED_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#else
-
-#define RTE_RED_ASSERT(exp) do { } while(0)
-
-#endif /* RTE_RED_DEBUG */
-
/**
* Externs
*
@@ -246,8 +233,8 @@ rte_red_enqueue_empty(const struct rte_red_config *red_cfg,
{
uint64_t time_diff = 0, m = 0;
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
red->count ++;
@@ -361,8 +348,8 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
struct rte_red *red,
const unsigned q)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
/**
* EWMA filter (Sally Floyd and Van Jacobson):
@@ -424,8 +411,8 @@ rte_red_enqueue(const struct rte_red_config *red_cfg,
const unsigned q,
const uint64_t time)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
if (q != 0) {
return rte_red_enqueue_nonempty(red_cfg, red, q);
--
2.7.0
^ permalink raw reply [flat|nested] 12+ messages in thread