From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id D82FE8E5E for ; Fri, 15 Jan 2016 15:45:17 +0100 (CET) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 15 Jan 2016 06:45:13 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.22,299,1449561600"; d="scan'208";a="861192879" Received: from unknown (HELO Sent) ([10.217.248.171]) by orsmga001.jf.intel.com with SMTP; 15 Jan 2016 06:45:11 -0800 Received: by Sent (sSMTP sendmail emulation); Fri, 15 Jan 2016 15:44:49 +0100 From: Tomasz Kulasek To: dev@dpdk.org Date: Fri, 15 Jan 2016 15:43:58 +0100 Message-Id: <1452869038-9140-3-git-send-email-tomaszx.kulasek@intel.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1452869038-9140-1-git-send-email-tomaszx.kulasek@intel.com> References: <1452869038-9140-1-git-send-email-tomaszx.kulasek@intel.com> Subject: [dpdk-dev] [PATCH 2/2] examples: sample apps rework to use buffered tx api X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 15 Jan 2016 14:45:18 -0000 The internal buffering of packets for TX in sample apps is no longer needed, so this patchset replaces this code with calls to the new rte_eth_tx_buffer* APIs in: * l2fwd-jobstats * l2fwd-keepalive * l2fwd * l3fwd-acl * l3fwd-power * link_status_interrupt * client_server_mp * l2fwd_fork * packet_ordering * qos_meter Signed-off-by: Bruce Richardson Signed-off-by: Tomasz Kulasek --- examples/l2fwd-jobstats/main.c | 73 +++++------------ examples/l2fwd-keepalive/main.c | 79 ++++--------------- examples/l2fwd/main.c | 80 ++++--------------- examples/l3fwd-acl/main.c | 64 ++------------- examples/l3fwd-power/main.c | 63 ++------------- examples/link_status_interrupt/main.c | 83 ++++---------------- .../client_server_mp/mp_client/client.c | 77 ++++++++---------- examples/multi_process/l2fwd_fork/main.c | 81 ++++--------------- examples/packet_ordering/main.c | 62 +++++++-------- examples/qos_meter/main.c | 46 ++--------- 10 files changed, 166 insertions(+), 542 deletions(-) diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c index 7b59f4e..9a6e6ea 100644 --- a/examples/l2fwd-jobstats/main.c +++ b/examples/l2fwd-jobstats/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -99,8 +99,6 @@ static unsigned int l2fwd_rx_queue_per_lcore = 1; struct mbuf_table { uint64_t next_flush_time; - unsigned len; - struct rte_mbuf *mbufs[MAX_PKT_BURST]; }; #define MAX_RX_QUEUE_PER_LCORE 16 @@ -373,58 +371,12 @@ show_stats_cb(__rte_unused void *param) rte_eal_alarm_set(timer_period * US_PER_S, show_stats_cb, NULL); } -/* Send the burst of packets on an output interface */ -static void -l2fwd_send_burst(struct lcore_queue_conf *qconf, uint8_t port) -{ - struct mbuf_table *m_table; - uint16_t ret; - uint16_t queueid = 0; - uint16_t n; - - m_table = &qconf->tx_mbufs[port]; - n = m_table->len; - - m_table->next_flush_time = rte_get_timer_cycles() + drain_tsc; - m_table->len = 0; - - ret = rte_eth_tx_burst(port, queueid, m_table->mbufs, n); - - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table->mbufs[ret]); - } while (++ret < n); - } -} - -/* Enqueue packets for TX and prepare them to be sent */ -static int -l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) -{ - const unsigned lcore_id = rte_lcore_id(); - struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; - struct mbuf_table *m_table = &qconf->tx_mbufs[port]; - uint16_t len = qconf->tx_mbufs[port].len; - - m_table->mbufs[len] = m; - - len++; - m_table->len = len; - - /* Enough pkts to be sent. */ - if (unlikely(len == MAX_PKT_BURST)) - l2fwd_send_burst(qconf, port); - - return 0; -} - static void l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + int sent; unsigned dst_port; dst_port = l2fwd_dst_ports[portid]; @@ -437,7 +389,9 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); - l2fwd_send_packet(m, (uint8_t) dst_port); + sent = rte_eth_tx_buffer(dst_port, 0, m); + if (sent) + port_statistics[dst_port].tx += sent; } static void @@ -513,6 +467,8 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg) struct lcore_queue_conf *qconf; struct mbuf_table *m_table; uint8_t portid; + unsigned i; + uint32_t sent; lcore_id = rte_lcore_id(); qconf = &lcore_queue_conf[lcore_id]; @@ -522,12 +478,19 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg) now = rte_get_timer_cycles(); lcore_id = rte_lcore_id(); qconf = &lcore_queue_conf[lcore_id]; - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - m_table = &qconf->tx_mbufs[portid]; - if (m_table->len == 0 || m_table->next_flush_time <= now) + + for (i = 0; i < qconf->n_rx_port; i++) { + m_table = &qconf->tx_mbufs[i]; + + if (m_table->next_flush_time <= now) continue; + m_table->next_flush_time = rte_get_timer_cycles() + drain_tsc; - l2fwd_send_burst(qconf, portid); + portid = qconf->rx_port_list[i]; + portid = l2fwd_dst_ports[portid]; + sent = rte_eth_tx_buffer_flush(portid, 0); + if (sent) + port_statistics[portid].tx += sent; } diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c index f4d52f2..b59ff6d 100644 --- a/examples/l2fwd-keepalive/main.c +++ b/examples/l2fwd-keepalive/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -97,17 +97,11 @@ static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; static unsigned int l2fwd_rx_queue_per_lcore = 1; -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { unsigned n_rx_port; unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; @@ -132,7 +126,7 @@ struct rte_mempool *l2fwd_pktmbuf_pool = NULL; struct l2fwd_port_statistics { uint64_t tx; uint64_t rx; - uint64_t dropped; + unsigned long dropped; } __rte_cache_aligned; struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; @@ -192,57 +186,12 @@ print_stats(__attribute__((unused)) struct rte_timer *ptr_timer, printf("\n====================================================\n"); } -/* Send the burst of packets on an output interface */ -static int -l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid = 0; - - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Enqueue packets for TX and prepare them to be sent */ -static int -l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - l2fwd_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - static void l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + int sent; unsigned dst_port; dst_port = l2fwd_dst_ports[portid]; @@ -255,7 +204,9 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); - l2fwd_send_packet(m, (uint8_t) dst_port); + sent = rte_eth_tx_buffer(dst_port, 0, m); + if (sent) + port_statistics[dst_port].tx += sent; } /* main processing loop */ @@ -265,6 +216,7 @@ l2fwd_main_loop(void) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_mbuf *m; unsigned lcore_id; + int sent; uint64_t prev_tsc, diff_tsc, cur_tsc; unsigned i, j, portid, nb_rx; struct lcore_queue_conf *qconf; @@ -312,13 +264,12 @@ l2fwd_main_loop(void) diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - l2fwd_send_burst(&lcore_queue_conf[lcore_id], - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_rx_port; i++) { + portid = qconf->rx_port_list[i]; + portid = l2fwd_dst_ports[portid]; + sent = rte_eth_tx_buffer_flush(portid, 0); + if (sent) + port_statistics[portid].tx += sent; } prev_tsc = cur_tsc; @@ -713,6 +664,10 @@ main(int argc, char **argv) "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid); + rte_eth_tx_buffer_set_err_callback(portid, 0, + rte_eth_count_unsent_packet_callback, + &port_statistics[portid].dropped); + /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c index 720fd5a..e6dce27 100644 --- a/examples/l2fwd/main.c +++ b/examples/l2fwd/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -95,17 +95,11 @@ static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; static unsigned int l2fwd_rx_queue_per_lcore = 1; -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { unsigned n_rx_port; unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; @@ -130,7 +124,7 @@ struct rte_mempool * l2fwd_pktmbuf_pool = NULL; struct l2fwd_port_statistics { uint64_t tx; uint64_t rx; - uint64_t dropped; + unsigned long dropped; } __rte_cache_aligned; struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; @@ -185,57 +179,12 @@ print_stats(void) printf("\n====================================================\n"); } -/* Send the burst of packets on an output interface */ -static int -l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid =0; - - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Enqueue packets for TX and prepare them to be sent */ -static int -l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - l2fwd_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - static void l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + int sent; unsigned dst_port; dst_port = l2fwd_dst_ports[portid]; @@ -248,7 +197,9 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); - l2fwd_send_packet(m, (uint8_t) dst_port); + sent = rte_eth_tx_buffer(dst_port, 0, m); + if (sent) + port_statistics[dst_port].tx += sent; } /* main processing loop */ @@ -258,6 +209,7 @@ l2fwd_main_loop(void) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_mbuf *m; unsigned lcore_id; + int sent; uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc; unsigned i, j, portid, nb_rx; struct lcore_queue_conf *qconf; @@ -277,7 +229,6 @@ l2fwd_main_loop(void) RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); for (i = 0; i < qconf->n_rx_port; i++) { - portid = qconf->rx_port_list[i]; RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, portid); @@ -293,13 +244,12 @@ l2fwd_main_loop(void) diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - l2fwd_send_burst(&lcore_queue_conf[lcore_id], - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_rx_port; i++) { + portid = qconf->rx_port_list[i]; + portid = l2fwd_dst_ports[portid]; + sent = rte_eth_tx_buffer_flush(portid, 0); + if (sent) + port_statistics[portid].tx += sent; } /* if timer is enabled */ @@ -666,6 +616,10 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid); + rte_eth_tx_buffer_set_err_callback(portid, 0, + rte_eth_count_unsent_packet_callback, + &port_statistics[portid].dropped); + /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c index f676d14..810cdac 100644 --- a/examples/l3fwd-acl/main.c +++ b/examples/l3fwd-acl/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -119,11 +119,6 @@ static uint32_t enabled_port_mask; static int promiscuous_on; /**< Ports set in promiscuous mode off by default. */ static int numa_on = 1; /**< NUMA is enabled by default. */ -struct mbuf_table { - uint16_t len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - struct lcore_rx_queue { uint8_t port_id; uint8_t queue_id; @@ -187,7 +182,7 @@ static struct rte_mempool *pktmbuf_pool[NB_SOCKETS]; static inline int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len); #endif -static inline int +static inline void send_single_packet(struct rte_mbuf *m, uint8_t port); #define MAX_ACL_RULE_NUM 100000 @@ -1292,55 +1287,17 @@ struct lcore_conf { uint16_t n_rx_queue; struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; -/* Send burst of packets on an output interface */ -static inline int -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) -{ - struct rte_mbuf **m_table; - int ret; - uint16_t queueid; - - queueid = qconf->tx_queue_id[port]; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, queueid, m_table, n); - if (unlikely(ret < n)) { - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - /* Enqueue a single packet, and send burst if queue is filled */ -static inline int +static inline void send_single_packet(struct rte_mbuf *m, uint8_t port) { - uint32_t lcore_id; - uint16_t len; - struct lcore_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } + uint16_t q = lcore_conf[rte_lcore_id()].tx_queue_id[port]; - qconf->tx_mbufs[port].len = len; - return 0; + rte_eth_tx_buffer(port, q, m); } #ifdef DO_RFC_1812_CHECKS @@ -1433,14 +1390,9 @@ main_loop(__attribute__((unused)) void *dummy) * This could be optimized (use queueid instead of * portid), but it is not called so often */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - send_burst(&lcore_conf[lcore_id], - qconf->tx_mbufs[portid].len, - portid); - qconf->tx_mbufs[portid].len = 0; - } + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) + rte_eth_tx_buffer_flush(portid, + qconf->tx_queue_id[portid]); prev_tsc = cur_tsc; } diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 828c18a..6f32242 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -173,11 +173,6 @@ enum freq_scale_hint_t FREQ_HIGHEST = 2 }; -struct mbuf_table { - uint16_t len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - struct lcore_rx_queue { uint8_t port_id; uint8_t queue_id; @@ -348,7 +343,6 @@ struct lcore_conf { uint16_t n_rx_queue; struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; lookup_struct_t * ipv4_lookup_struct; lookup_struct_t * ipv6_lookup_struct; } __rte_cache_aligned; @@ -442,50 +436,12 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim, stats[lcore_id].sleep_time = 0; } -/* Send burst of packets on an output interface */ -static inline int -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) -{ - struct rte_mbuf **m_table; - int ret; - uint16_t queueid; - - queueid = qconf->tx_queue_id[port]; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, queueid, m_table, n); - if (unlikely(ret < n)) { - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Enqueue a single packet, and send burst if queue is filled */ -static inline int +static inline void send_single_packet(struct rte_mbuf *m, uint8_t port) { - uint32_t lcore_id; - uint16_t len; - struct lcore_conf *qconf; - - lcore_id = rte_lcore_id(); + uint16_t q = lcore_conf[rte_lcore_id()].tx_queue_id[port]; - qconf = &lcore_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; + rte_eth_tx_buffer(port, q, m); } #ifdef DO_RFC_1812_CHECKS @@ -910,14 +866,9 @@ main_loop(__attribute__((unused)) void *dummy) * This could be optimized (use queueid instead of * portid), but it is not called so often */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - send_burst(&lcore_conf[lcore_id], - qconf->tx_mbufs[portid].len, - portid); - qconf->tx_mbufs[portid].len = 0; - } + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) + rte_eth_tx_buffer_flush(portid, + qconf->tx_queue_id[portid]); prev_tsc = cur_tsc; } diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c index c57a08a..ec51cbe 100644 --- a/examples/link_status_interrupt/main.c +++ b/examples/link_status_interrupt/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -96,19 +96,12 @@ static unsigned int lsi_rx_queue_per_lcore = 1; /* destination port for L2 forwarding */ static unsigned lsi_dst_ports[RTE_MAX_ETHPORTS] = {0}; -#define MAX_PKT_BURST 32 -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { unsigned n_rx_port; unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; unsigned tx_queue_id; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; @@ -136,7 +129,7 @@ struct rte_mempool * lsi_pktmbuf_pool = NULL; struct lsi_port_statistics { uint64_t tx; uint64_t rx; - uint64_t dropped; + unsigned long dropped; } __rte_cache_aligned; struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS]; @@ -202,58 +195,12 @@ print_stats(void) printf("\n====================================================\n"); } -/* Send the packet on an output interface */ -static int -lsi_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid; - - queueid = (uint16_t) qconf->tx_queue_id; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Send the packet on an output interface */ -static int -lsi_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - lsi_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - static void lsi_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + unsigned sent; unsigned dst_port = lsi_dst_ports[portid]; eth = rte_pktmbuf_mtod(m, struct ether_hdr *); @@ -265,7 +212,9 @@ lsi_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&lsi_ports_eth_addr[dst_port], ð->s_addr); - lsi_send_packet(m, (uint8_t) dst_port); + sent = rte_eth_tx_buffer(dst_port, 0, m); + if (sent) + port_statistics[dst_port].tx += sent; } /* main processing loop */ @@ -275,6 +224,7 @@ lsi_main_loop(void) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_mbuf *m; unsigned lcore_id; + unsigned sent; uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc; unsigned i, j, portid, nb_rx; struct lcore_queue_conf *qconf; @@ -310,15 +260,12 @@ lsi_main_loop(void) diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - /* this could be optimized (use queueid instead of - * portid), but it is not called so often */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - lsi_send_burst(&lcore_queue_conf[lcore_id], - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_rx_port; i++) { + portid = qconf->rx_port_list[i]; + portid = lsi_dst_ports[portid]; + sent = rte_eth_tx_buffer_flush(portid, 0); + if (sent) + port_statistics[portid].tx += sent; } /* if timer is enabled */ @@ -700,6 +647,10 @@ main(int argc, char **argv) rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL); + rte_eth_tx_buffer_set_err_callback(portid, 0, + rte_eth_count_unsent_packet_callback, + &port_statistics[portid].dropped); + rte_eth_macaddr_get(portid, &lsi_ports_eth_addr[portid]); diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c index bf049a4..2321550 100644 --- a/examples/multi_process/client_server_mp/mp_client/client.c +++ b/examples/multi_process/client_server_mp/mp_client/client.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,22 +72,12 @@ * queue to write to. */ static uint8_t client_id = 0; -struct mbuf_queue { -#define MBQ_CAPACITY 32 - struct rte_mbuf *bufs[MBQ_CAPACITY]; - uint16_t top; -}; - /* maps input ports to output ports for packets */ static uint8_t output_ports[RTE_MAX_ETHPORTS]; -/* buffers up a set of packet that are ready to send */ -static struct mbuf_queue output_bufs[RTE_MAX_ETHPORTS]; - /* shared data from server. We update statistics here */ static volatile struct tx_stats *tx_stats; - /* * print a usage message */ @@ -149,6 +139,23 @@ parse_app_args(int argc, char *argv[]) } /* + * Tx buffer error callback + */ +static void +flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count, + void *userdata) { + int i; + uint8_t port = (uintptr_t)userdata; + + tx_stats->tx_drop[port] += count; + + /* free the mbufs which failed from transmit */ + for (i = 0; i < count; i++) + rte_pktmbuf_free(unsent[i]); + +} + +/* * set up output ports so that all traffic on port gets sent out * its paired port. Index using actual port numbers since that is * what comes in the mbuf structure. @@ -164,41 +171,14 @@ static void configure_output_ports(const struct port_info *ports) uint8_t p2 = ports->id[i+1]; output_ports[p1] = p2; output_ports[p2] = p1; - } -} - -static inline void -send_packets(uint8_t port) -{ - uint16_t i, sent; - struct mbuf_queue *mbq = &output_bufs[port]; + rte_eth_tx_buffer_set_err_callback(p1, client_id, + flush_tx_error_callback, (void *)(intptr_t)p1); - if (unlikely(mbq->top == 0)) - return; + rte_eth_tx_buffer_set_err_callback(p2, client_id, + flush_tx_error_callback, (void *)(intptr_t)p2); - sent = rte_eth_tx_burst(port, client_id, mbq->bufs, mbq->top); - if (unlikely(sent < mbq->top)){ - for (i = sent; i < mbq->top; i++) - rte_pktmbuf_free(mbq->bufs[i]); - tx_stats->tx_drop[port] += (mbq->top - sent); } - tx_stats->tx[port] += sent; - mbq->top = 0; -} - -/* - * Enqueue a packet to be sent on a particular port, but - * don't send it yet. Only when the buffer is full. - */ -static inline void -enqueue_packet(struct rte_mbuf *buf, uint8_t port) -{ - struct mbuf_queue *mbq = &output_bufs[port]; - mbq->bufs[mbq->top++] = buf; - - if (mbq->top == MBQ_CAPACITY) - send_packets(port); } /* @@ -209,10 +189,13 @@ enqueue_packet(struct rte_mbuf *buf, uint8_t port) static void handle_packet(struct rte_mbuf *buf) { + unsigned sent; const uint8_t in_port = buf->port; const uint8_t out_port = output_ports[in_port]; - enqueue_packet(buf, out_port); + sent = rte_eth_tx_buffer(out_port, client_id, buf); + if (unlikely(sent)) + tx_stats->tx[out_port] += sent; } /* @@ -229,6 +212,7 @@ main(int argc, char *argv[]) int need_flush = 0; /* indicates whether we have unsent packets */ int retval; void *pkts[PKT_READ_SIZE]; + uint16_t sent; if ((retval = rte_eal_init(argc, argv)) < 0) return -1; @@ -274,8 +258,11 @@ main(int argc, char *argv[]) if (unlikely(rx_pkts == 0)){ if (need_flush) - for (port = 0; port < ports->num_ports; port++) - send_packets(ports->id[port]); + for (port = 0; port < ports->num_ports; port++) { + sent = rte_eth_tx_buffer_flush(ports->id[port], client_id); + if (unlikely(sent)) + tx_stats->tx[port] += sent; + } need_flush = 0; continue; } diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c index f2d7eab..f919e07 100644 --- a/examples/multi_process/l2fwd_fork/main.c +++ b/examples/multi_process/l2fwd_fork/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -117,18 +117,11 @@ static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; static unsigned int l2fwd_rx_queue_per_lcore = 1; -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { unsigned n_rx_port; unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; - } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; @@ -176,7 +169,7 @@ static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS]; struct l2fwd_port_statistics { uint64_t tx; uint64_t rx; - uint64_t dropped; + unsigned long dropped; } __rte_cache_aligned; struct l2fwd_port_statistics *port_statistics; /** @@ -583,57 +576,12 @@ slave_exit_cb(unsigned slaveid, __attribute__((unused))int stat) rte_spinlock_unlock(&res_lock); } -/* Send the packet on an output interface */ -static int -l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid =0; - - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Send the packet on an output interface */ -static int -l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - l2fwd_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - static void l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + unsigned sent; unsigned dst_port; dst_port = l2fwd_dst_ports[portid]; @@ -646,7 +594,9 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); - l2fwd_send_packet(m, (uint8_t) dst_port); + sent = rte_eth_tx_buffer(dst_port, 0, m); + if (sent) + port_statistics[dst_port].tx += sent; } /* main processing loop */ @@ -656,6 +606,7 @@ l2fwd_main_loop(void) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_mbuf *m; unsigned lcore_id; + unsigned sent; uint64_t prev_tsc, diff_tsc, cur_tsc; unsigned i, j, portid, nb_rx; struct lcore_queue_conf *qconf; @@ -698,14 +649,12 @@ l2fwd_main_loop(void) */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - l2fwd_send_burst(&lcore_queue_conf[lcore_id], - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_rx_port; i++) { + portid = qconf->rx_port_list[i]; + portid = l2fwd_dst_ports[portid]; + sent = rte_eth_tx_buffer_flush(portid, 0); + if (sent) + port_statistics[portid].tx += sent; } } @@ -1144,6 +1093,10 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid); + rte_eth_tx_buffer_set_err_callback(portid, 0, + rte_eth_count_unsent_packet_callback, + &port_statistics[portid].dropped); + /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c index 1d9a86f..a11d68e 100644 --- a/examples/packet_ordering/main.c +++ b/examples/packet_ordering/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -86,11 +86,6 @@ struct send_thread_args { struct rte_reorder_buffer *buffer; }; -struct output_buffer { - unsigned count; - struct rte_mbuf *mbufs[MAX_PKTS_BURST]; -}; - volatile struct app_stats { struct { uint64_t rx_pkts; @@ -235,6 +230,20 @@ parse_args(int argc, char **argv) return 0; } +/* + * Tx buffer error callback + */ +static void +flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count, + void *userdata __rte_unused) { + + /* free the mbufs which failed from transmit */ + app_stats.tx.ro_tx_failed_pkts += count; + LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); + pktmbuf_free_bulk(unsent, count); + +} + static inline int configure_eth_port(uint8_t port_id) { @@ -266,6 +275,9 @@ configure_eth_port(uint8_t port_id) return ret; } + rte_eth_tx_buffer_set_err_callback(port_id, 0, flush_tx_error_callback, + NULL); + ret = rte_eth_dev_start(port_id); if (ret < 0) return ret; @@ -438,22 +450,6 @@ worker_thread(void *args_ptr) return 0; } -static inline void -flush_one_port(struct output_buffer *outbuf, uint8_t outp) -{ - unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs, - outbuf->count); - app_stats.tx.ro_tx_pkts += nb_tx; - - if (unlikely(nb_tx < outbuf->count)) { - /* free the mbufs which failed from transmit */ - app_stats.tx.ro_tx_failed_pkts += (outbuf->count - nb_tx); - LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); - pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx); - } - outbuf->count = 0; -} - /** * Dequeue mbufs from the workers_to_tx ring and reorder them before * transmitting. @@ -464,8 +460,8 @@ send_thread(struct send_thread_args *args) int ret; unsigned int i, dret; uint16_t nb_dq_mbufs; + uint16_t sent; uint8_t outp; - static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS]; struct rte_mbuf *mbufs[MAX_PKTS_BURST]; struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL}; @@ -515,7 +511,6 @@ send_thread(struct send_thread_args *args) dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST); for (i = 0; i < dret; i++) { - struct output_buffer *outbuf; uint8_t outp1; outp1 = rombufs[i]->port; @@ -525,10 +520,10 @@ send_thread(struct send_thread_args *args) continue; } - outbuf = &tx_buffers[outp1]; - outbuf->mbufs[outbuf->count++] = rombufs[i]; - if (outbuf->count == MAX_PKTS_BURST) - flush_one_port(outbuf, outp1); + sent = rte_eth_tx_buffer(outp1, 0, rombufs[i]); + if (sent) + app_stats.tx.ro_tx_pkts += sent; + } } return 0; @@ -541,10 +536,9 @@ static int tx_thread(struct rte_ring *ring_in) { uint32_t i, dqnum; + uint16_t sent; uint8_t outp; - static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS]; struct rte_mbuf *mbufs[MAX_PKTS_BURST]; - struct output_buffer *outbuf; RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); @@ -567,10 +561,10 @@ tx_thread(struct rte_ring *ring_in) continue; } - outbuf = &tx_buffers[outp]; - outbuf->mbufs[outbuf->count++] = mbufs[i]; - if (outbuf->count == MAX_PKTS_BURST) - flush_one_port(outbuf, outp); + sent = rte_eth_tx_buffer(outp, 0, mbufs[i]); + if (sent) + app_stats.tx.ro_tx_pkts += sent; + } } diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c index 0de5e7f..7d901d2 100644 --- a/examples/qos_meter/main.c +++ b/examples/qos_meter/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -118,8 +118,6 @@ static struct rte_eth_conf port_conf = { static uint8_t port_rx; static uint8_t port_tx; static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX]; -static struct rte_mbuf *pkts_tx[PKT_TX_BURST_MAX]; -static uint16_t pkts_tx_len = 0; struct rte_meter_srtcm_params app_srtcm_params[] = { @@ -188,26 +186,9 @@ main_loop(__attribute__((unused)) void *dummy) current_time = rte_rdtsc(); time_diff = current_time - last_time; if (unlikely(time_diff > TIME_TX_DRAIN)) { - int ret; - if (pkts_tx_len == 0) { - last_time = current_time; - - continue; - } - - /* Write packet burst to NIC TX */ - ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, pkts_tx_len); - - /* Free buffers for any packets not written successfully */ - if (unlikely(ret < pkts_tx_len)) { - for ( ; ret < pkts_tx_len; ret ++) { - rte_pktmbuf_free(pkts_tx[ret]); - } - } - - /* Empty the output buffer */ - pkts_tx_len = 0; + /* Flush tx buffer */ + rte_eth_tx_buffer_flush(port_tx, NIC_TX_QUEUE); last_time = current_time; } @@ -222,26 +203,9 @@ main_loop(__attribute__((unused)) void *dummy) /* Handle current packet */ if (app_pkt_handle(pkt, current_time) == DROP) rte_pktmbuf_free(pkt); - else { - pkts_tx[pkts_tx_len] = pkt; - pkts_tx_len ++; - } - - /* Write packets from output buffer to NIC TX when full burst is available */ - if (unlikely(pkts_tx_len == PKT_TX_BURST_MAX)) { - /* Write packet burst to NIC TX */ - int ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, PKT_TX_BURST_MAX); + else + rte_eth_tx_buffer(port_tx, NIC_TX_QUEUE, pkt); - /* Free buffers for any packets not written successfully */ - if (unlikely(ret < PKT_TX_BURST_MAX)) { - for ( ; ret < PKT_TX_BURST_MAX; ret ++) { - rte_pktmbuf_free(pkts_tx[ret]); - } - } - - /* Empty the output buffer */ - pkts_tx_len = 0; - } } } } -- 1.7.9.5