From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id F2F93C37E for ; Tue, 2 Jun 2015 08:54:08 +0200 (CEST) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga101.fm.intel.com with ESMTP; 01 Jun 2015 23:54:08 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,538,1427785200"; d="scan'208";a="735429394" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga002.fm.intel.com with ESMTP; 01 Jun 2015 23:54:06 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t526s4vH023419; Tue, 2 Jun 2015 14:54:04 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t526s076024799; Tue, 2 Jun 2015 14:54:02 +0800 Received: (from cliang18@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t526s0Ai024795; Tue, 2 Jun 2015 14:54:00 +0800 From: Cunming Liang To: dev@dpdk.org Date: Tue, 2 Jun 2015 14:53:25 +0800 Message-Id: <1433228006-24661-13-git-send-email-cunming.liang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1433228006-24661-1-git-send-email-cunming.liang@intel.com> References: <1432889125-20255-1-git-send-email-cunming.liang@intel.com> <1433228006-24661-1-git-send-email-cunming.liang@intel.com> Cc: shemming@brocade.com, liang-min.wang@intel.com Subject: [dpdk-dev] [PATCH v10 12/13] l3fwd-power: enable one-shot rx interrupt and polling/interrupt mode switch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 02 Jun 2015 06:54:09 -0000 Demonstrate how to handle per rx queue interrupt in a NAPI-like implementation in usersapce. PDK polling thread mainly works in polling mode and switch to interrupt mode only if there is no any packet received in recent polls. Usersapce interrupt notification generally takes a lot more cycles than kernel, so one-shot interrupt is used here to guarantee minimum overhead and DPDK polling thread returns to polling mode immediately once it receives an interrupt notificaiton for incoming packet. Signed-off-by: Danny Zhou Signed-off-by: Cunming Liang --- v7 changes - using new APIs - demo multiple port/queue pair wait on the same epoll instance v6 changes - Split event fd add and wait v5 changes - Change invoked function name and parameter to accomodate EAL change v3 changes - Add spinlock to ensure thread safe when accessing interrupt mask register v2 changes - Remove unused function which is for debug purpose examples/l3fwd-power/main.c | 207 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 165 insertions(+), 42 deletions(-) diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 6ac342b..538bb93 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -74,12 +74,14 @@ #include #include #include +#include +#include #define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1 #define MAX_PKT_BURST 32 -#define MIN_ZERO_POLL_COUNT 5 +#define MIN_ZERO_POLL_COUNT 10 /* around 100ms at 2 Ghz */ #define TIMER_RESOLUTION_CYCLES 200000000ULL @@ -153,6 +155,9 @@ static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /* ethernet addresses of ports */ static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; +/* ethernet addresses of ports */ +static rte_spinlock_t locks[RTE_MAX_ETHPORTS]; + /* mask of enabled ports */ static uint32_t enabled_port_mask = 0; /* Ports set in promiscuous mode off by default. */ @@ -185,6 +190,9 @@ struct lcore_rx_queue { #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS #define MAX_RX_QUEUE_PER_PORT 128 +#define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16 + + #define MAX_LCORE_PARAMS 1024 struct lcore_params { uint8_t port_id; @@ -211,7 +219,7 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / static struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, + .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, .header_split = 0, /**< Header Split disabled */ @@ -223,11 +231,15 @@ static struct rte_eth_conf port_conf = { .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP, + .rss_hf = ETH_RSS_UDP, }, }, .txmode = { - .mq_mode = ETH_DCB_NONE, + .mq_mode = ETH_MQ_TX_NONE, + }, + .intr_conf = { + .lsc = 1, + .rxq = 1, /**< rxq interrupt feature enabled */ }, }; @@ -399,19 +411,22 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim, /* accumulate total execution time in us when callback is invoked */ sleep_time_ratio = (float)(stats[lcore_id].sleep_time) / (float)SCALING_PERIOD; - /** * check whether need to scale down frequency a step if it sleep a lot. */ - if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) - rte_power_freq_down(lcore_id); + if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) { + if (rte_power_freq_down) + rte_power_freq_down(lcore_id); + } else if ( (unsigned)(stats[lcore_id].nb_rx_processed / - stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) + stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) { /** * scale down a step if average packet per iteration less * than expectation. */ - rte_power_freq_down(lcore_id); + if (rte_power_freq_down) + rte_power_freq_down(lcore_id); + } /** * initialize another timer according to current frequency to ensure @@ -704,22 +719,20 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, } -#define SLEEP_GEAR1_THRESHOLD 100 -#define SLEEP_GEAR2_THRESHOLD 1000 +#define MINIMUM_SLEEP_TIME 1 +#define SUSPEND_THRESHOLD 300 static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count) { - /* If zero count is less than 100, use it as the sleep time in us */ - if (zero_rx_packet_count < SLEEP_GEAR1_THRESHOLD) - return zero_rx_packet_count; - /* If zero count is less than 1000, sleep time should be 100 us */ - else if ((zero_rx_packet_count >= SLEEP_GEAR1_THRESHOLD) && - (zero_rx_packet_count < SLEEP_GEAR2_THRESHOLD)) - return SLEEP_GEAR1_THRESHOLD; - /* If zero count is greater than 1000, sleep time should be 1000 us */ - else if (zero_rx_packet_count >= SLEEP_GEAR2_THRESHOLD) - return SLEEP_GEAR2_THRESHOLD; + /* If zero count is less than 100, sleep 1us */ + if (zero_rx_packet_count < SUSPEND_THRESHOLD) + return MINIMUM_SLEEP_TIME; + /* If zero count is less than 1000, sleep 100 us which is the + minimum latency switching from C3/C6 to C0 + */ + else + return SUSPEND_THRESHOLD; return 0; } @@ -759,6 +772,84 @@ power_freq_scaleup_heuristic(unsigned lcore_id, return FREQ_CURRENT; } +/** + * force polling thread sleep until one-shot rx interrupt triggers + * @param port_id + * Port id. + * @param queue_id + * Rx queue id. + * @return + * 0 on success + */ +static int +sleep_until_rx_interrupt(int num) +{ + struct rte_epoll_event event[num]; + int n, i; + uint8_t port_id, queue_id; + void *data; + + RTE_LOG(INFO, L3FWD_POWER, + "lcore %u sleeps until interrupt triggers\n", + rte_lcore_id()); + + n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, -1); + for (i = 0; i < n; i++) { + data = event[i].epdata.data; + port_id = ((uintptr_t)data) >> CHAR_BIT; + queue_id = ((uintptr_t)data) & + RTE_LEN2MASK(CHAR_BIT, uint8_t); + RTE_LOG(INFO, L3FWD_POWER, + "lcore %u is waked up from rx interrupt on" + " port %d queue %d\n", + rte_lcore_id(), port_id, queue_id); + } + + return 0; +} + +static int turn_on_intr(struct lcore_conf *qconf) +{ + int i; + struct lcore_rx_queue *rx_queue; + uint8_t port_id, queue_id; + + for (i = 0; i < qconf->n_rx_queue; ++i) { + rx_queue = &(qconf->rx_queue_list[i]); + port_id = rx_queue->port_id; + queue_id = rx_queue->queue_id; + + rte_spinlock_lock(&(locks[port_id])); + rte_eth_dev_rx_intr_enable(port_id, queue_id); + rte_spinlock_unlock(&(locks[port_id])); + } +} + +static int event_register(struct lcore_conf *qconf) +{ + struct lcore_rx_queue *rx_queue; + uint8_t portid, queueid; + uint32_t data; + int ret; + int i; + + for (i = 0; i < qconf->n_rx_queue; ++i) { + rx_queue = &(qconf->rx_queue_list[i]); + portid = rx_queue->port_id; + queueid = rx_queue->queue_id; + data = portid << CHAR_BIT | queueid; + + ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid, + RTE_EPOLL_PER_THREAD, + RTE_INTR_EVENT_ADD, + (void *)((uintptr_t)data)); + if (ret) + return ret; + } + + return 0; +} + /* main processing loop */ static int main_loop(__attribute__((unused)) void *dummy) @@ -772,9 +863,9 @@ main_loop(__attribute__((unused)) void *dummy) struct lcore_conf *qconf; struct lcore_rx_queue *rx_queue; enum freq_scale_hint_t lcore_scaleup_hint; - uint32_t lcore_rx_idle_count = 0; uint32_t lcore_idle_hint = 0; + int intr_en = 0; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -791,13 +882,18 @@ main_loop(__attribute__((unused)) void *dummy) RTE_LOG(INFO, L3FWD_POWER, "entering main loop on lcore %u\n", lcore_id); for (i = 0; i < qconf->n_rx_queue; i++) { - portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu " "rxqueueid=%hhu\n", lcore_id, portid, queueid); } + /* add into event wait list */ + if (port_conf.intr_conf.rxq && event_register(qconf) == 0) + intr_en = 1; + else + RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n"); + while (1) { stats[lcore_id].nb_iteration_looped++; @@ -832,6 +928,7 @@ main_loop(__attribute__((unused)) void *dummy) prev_tsc_power = cur_tsc_power; } +start_rx: /* * Read packet from RX queues */ @@ -845,6 +942,7 @@ main_loop(__attribute__((unused)) void *dummy) nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST); + stats[lcore_id].nb_rx_processed += nb_rx; if (unlikely(nb_rx == 0)) { /** @@ -907,10 +1005,13 @@ main_loop(__attribute__((unused)) void *dummy) rx_queue->freq_up_hint; } - if (lcore_scaleup_hint == FREQ_HIGHEST) - rte_power_freq_max(lcore_id); - else if (lcore_scaleup_hint == FREQ_HIGHER) - rte_power_freq_up(lcore_id); + if (lcore_scaleup_hint == FREQ_HIGHEST) { + if (rte_power_freq_max) + rte_power_freq_max(lcore_id); + } else if (lcore_scaleup_hint == FREQ_HIGHER) { + if (rte_power_freq_up) + rte_power_freq_up(lcore_id); + } } else { /** * All Rx queues empty in recent consecutive polls, @@ -925,16 +1026,23 @@ main_loop(__attribute__((unused)) void *dummy) lcore_idle_hint = rx_queue->idle_hint; } - if ( lcore_idle_hint < SLEEP_GEAR1_THRESHOLD) + if (lcore_idle_hint < SUSPEND_THRESHOLD) /** - * execute "pause" instruction to avoid context - * switch for short sleep. - */ + * execute "pause" instruction to avoid context + * switch which generally take hundres of + * microsecond for short sleep. + */ rte_delay_us(lcore_idle_hint); - else - /* long sleep force runing thread to suspend */ - usleep(lcore_idle_hint); - + else { + /* suspend untill rx interrupt trigges */ + if (intr_en) { + turn_on_intr(qconf); + sleep_until_rx_interrupt( + qconf->n_rx_queue); + } + /* start receiving packets immediately */ + goto start_rx; + } stats[lcore_id].sleep_time += lcore_idle_hint; } } @@ -1267,7 +1375,7 @@ setup_hash(int socketid) char s[64]; /* create ipv4 hash */ - snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid); + rte_snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid); ipv4_l3fwd_hash_params.name = s; ipv4_l3fwd_hash_params.socket_id = socketid; ipv4_l3fwd_lookup_struct[socketid] = @@ -1277,7 +1385,7 @@ setup_hash(int socketid) "socket %d\n", socketid); /* create ipv6 hash */ - snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid); + rte_snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid); ipv6_l3fwd_hash_params.name = s; ipv6_l3fwd_hash_params.socket_id = socketid; ipv6_l3fwd_lookup_struct[socketid] = @@ -1471,6 +1579,7 @@ main(int argc, char **argv) unsigned lcore_id; uint64_t hz; uint32_t n_tx_queue, nb_lcores; + uint32_t dev_rxq_num, dev_txq_num; uint8_t portid, nb_rx_queue, queue, socketid; /* catch SIGINT and restore cpufreq governor to ondemand */ @@ -1520,10 +1629,19 @@ main(int argc, char **argv) printf("Initializing port %d ... ", portid ); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + dev_rxq_num = dev_info.max_rx_queues; + dev_txq_num = dev_info.max_tx_queues; + nb_rx_queue = get_port_n_rx_queues(portid); + if (nb_rx_queue > dev_rxq_num) + rte_exit(EXIT_FAILURE, + "Cannot configure not existed rxq: " + "port=%d\n", portid); + n_tx_queue = nb_lcores; - if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) - n_tx_queue = MAX_TX_QUEUE_PER_PORT; + if (n_tx_queue > dev_txq_num) + n_tx_queue = dev_txq_num; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue ); ret = rte_eth_dev_configure(portid, nb_rx_queue, @@ -1547,6 +1665,9 @@ main(int argc, char **argv) if (rte_lcore_is_enabled(lcore_id) == 0) continue; + if (queueid >= dev_txq_num) + continue; + if (numa_on) socketid = \ (uint8_t)rte_lcore_to_socket_id(lcore_id); @@ -1581,8 +1702,9 @@ main(int argc, char **argv) /* init power management library */ ret = rte_power_init(lcore_id); if (ret) - rte_exit(EXIT_FAILURE, "Power management library " - "initialization failed on core%u\n", lcore_id); + rte_log(RTE_LOG_ERR, RTE_LOGTYPE_POWER, + "Power management library initialization " + "failed on core%u", lcore_id); /* init timer structures for each enabled lcore */ rte_timer_init(&power_timers[lcore_id]); @@ -1630,7 +1752,6 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, " "port=%d\n", ret, portid); - /* * If enabled, put device in promiscuous mode. * This allows IO forwarding mode to forward packets @@ -1639,6 +1760,8 @@ main(int argc, char **argv) */ if (promiscuous_on) rte_eth_promiscuous_enable(portid); + /* initialize spinlock for each port */ + rte_spinlock_init(&(locks[portid])); } check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); -- 1.8.1.4