From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 139069E3 for ; Thu, 30 Mar 2017 01:26:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=intel.com; i=@intel.com; q=dns/txt; s=intel; t=1490829997; x=1522365997; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=cp0ah48IMctKNDOSnbiwm6crZrWI1jtgu634fCGLWAg=; b=ehpiCivnrF1pROKpIr28qWKTszp6+0I80yLDdHB22iShCWGAxzk4iC9N MYxmzKMOQoMEUq9gAWpcx8+Hn/6rtA==; Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 Mar 2017 16:26:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.36,243,1486454400"; d="scan'208";a="82491264" Received: from silpixa00398672.ir.intel.com ([10.237.223.128]) by fmsmga006.fm.intel.com with ESMTP; 29 Mar 2017 16:26:35 -0700 From: Harry van Haaren To: dev@dpdk.org Cc: jerin.jacob@caviumnetworks.com, Harry van Haaren , Bruce Richardson , David Hunt Date: Thu, 30 Mar 2017 00:25:58 +0100 Message-Id: <1490829963-106807-17-git-send-email-harry.van.haaren@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1490829963-106807-1-git-send-email-harry.van.haaren@intel.com> References: <1490374395-149320-1-git-send-email-harry.van.haaren@intel.com> <1490829963-106807-1-git-send-email-harry.van.haaren@intel.com> Subject: [dpdk-dev] [PATCH v6 16/21] test/eventdev: add basic SW tests X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 29 Mar 2017 23:26:38 -0000 This commit adds basic enqueue and dequeue unit tests, some negative invalid tests, and configuration. Signed-off-by: Bruce Richardson Signed-off-by: David Hunt Signed-off-by: Harry van Haaren Acked-by: Anatoly Burakov --- test/test/test_eventdev_sw.c | 1060 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1060 insertions(+) diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c index 808b7b3..f294cb9 100644 --- a/test/test/test_eventdev_sw.c +++ b/test/test/test_eventdev_sw.c @@ -64,6 +64,8 @@ struct test { int nb_qids; }; +static struct rte_event release_ev; + static inline struct rte_mbuf * rte_gen_arp(int portid, struct rte_mempool *mp) { @@ -307,12 +309,1004 @@ test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats) return 0; } +static int +test_single_directed_packet(struct test *t) +{ + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 3 directed QIDs going to 3 ports */ + if (init(t, 3, 3) < 0 || + create_ports(t, 3) < 0 || + create_directed_qids(t, 3, t->port) < 0) + return -1; + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = wrk_enq, + .mbuf = arp, + }; + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + const uint32_t MAGIC_SEQN = 4711; + arp->seqn = MAGIC_SEQN; + + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1); + if (err < 0) { + printf("%d: error failed to enqueue\n", __LINE__); + return -1; + } + + /* Run schedule() as dir packets may need to be re-ordered */ + rte_event_schedule(evdev); + + struct test_event_dev_stats stats; + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: error failed to get stats\n", __LINE__); + return -1; + } + + if (stats.port_rx_pkts[rx_enq] != 1) { + printf("%d: error stats incorrect for directed port\n", + __LINE__); + return -1; + } + + uint32_t deq_pkts; + deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_rx_pkts[wrk_enq] != 0 && + stats.port_rx_pkts[wrk_enq] != 1) { + printf("%d: error directed stats post-dequeue\n", __LINE__); + return -1; + } + + if (ev.mbuf->seqn != MAGIC_SEQN) { + printf("%d: error magic sequence number not dequeued\n", + __LINE__); + return -1; + } + + rte_pktmbuf_free(ev.mbuf); + cleanup(t); + return 0; +} + +static int +burst_packets(struct test *t) +{ + /************** CONFIG ****************/ + uint32_t i; + int err; + int ret; + + /* Create instance with 2 ports and 2 queues */ + if (init(t, 2, 2) < 0 || + create_ports(t, 2) < 0 || + create_atomic_qids(t, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1); + if (ret != 1) { + printf("%d: error mapping lb qid0\n", __LINE__); + return -1; + } + ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1); + if (ret != 1) { + printf("%d: error mapping lb qid1\n", __LINE__); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ + const uint32_t rx_port = 0; + const uint32_t NUM_PKTS = 2; + + for (i = 0; i < NUM_PKTS; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: error generating pkt\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = i % 2, + .flow_id = i % 3, + .mbuf = arp, + }; + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + rte_event_schedule(evdev); + + /* Check stats for all NUM_PKTS arrived to sched core */ + struct test_event_dev_stats stats; + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) { + printf("%d: Sched core didn't receive all %d pkts\n", + __LINE__, NUM_PKTS); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + uint32_t deq_pkts; + int p; + + deq_pkts = 0; + /******** DEQ QID 1 *******/ + do { + struct rte_event ev; + p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0); + deq_pkts += p; + rte_pktmbuf_free(ev.mbuf); + } while (p); + + if (deq_pkts != NUM_PKTS/2) { + printf("%d: Half of NUM_PKTS didn't arrive at port 1\n", + __LINE__); + return -1; + } + + /******** DEQ QID 2 *******/ + deq_pkts = 0; + do { + struct rte_event ev; + p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0); + deq_pkts += p; + rte_pktmbuf_free(ev.mbuf); + } while (p); + if (deq_pkts != NUM_PKTS/2) { + printf("%d: Half of NUM_PKTS didn't arrive at port 2\n", + __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +abuse_inflights(struct test *t) +{ + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* Enqueue op only */ + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + /* schedule */ + rte_event_schedule(evdev); + + struct test_event_dev_stats stats; + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + if (stats.rx_pkts != 0 || + stats.tx_pkts != 0 || + stats.port_inflight[wrk_enq] != 0) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +port_reconfig_credits(struct test *t) +{ + if (init(t, 1, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + uint32_t i; + const uint32_t NUM_ITERS = 32; + for (i = 0; i < NUM_ITERS; i++) { + const struct rte_event_queue_conf conf = { + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid\n", __LINE__); + return -1; + } + t->qid[0] = 0; + + static const struct rte_event_port_conf port_conf = { + .new_event_threshold = 128, + .dequeue_depth = 32, + .enqueue_depth = 64, + }; + if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + return -1; + } + + int links = rte_event_port_link(evdev, 0, NULL, NULL, 0); + if (links != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + const uint32_t NPKTS = 1; + uint32_t j; + for (j = 0; j < NPKTS; j++) { + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto fail; + } + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + int err = rte_event_enqueue_burst(evdev, 0, &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + rte_event_dev_dump(0, stdout); + goto fail; + } + } + + rte_event_schedule(evdev); + + struct rte_event ev[NPKTS]; + int deq = rte_event_dequeue_burst(evdev, t->port[0], ev, + NPKTS, 0); + if (deq != 1) + printf("%d error; no packet dequeued\n", __LINE__); + + /* let cleanup below stop the device on last iter */ + if (i != NUM_ITERS-1) + rte_event_dev_stop(evdev); + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +port_single_lb_reconfig(struct test *t) +{ + if (init(t, 2, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + goto fail; + } + + static const struct rte_event_queue_conf conf_lb_atomic = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto fail; + } + + static const struct rte_event_queue_conf conf_single_link = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto fail; + } + + struct rte_event_port_conf port_conf = { + .new_event_threshold = 128, + .dequeue_depth = 32, + .enqueue_depth = 64, + }; + if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + goto fail; + } + if (rte_event_port_setup(evdev, 1, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + goto fail; + } + + /* link port to lb queue */ + uint8_t queue_id = 0; + if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { + printf("%d: error creating link for qid\n", __LINE__); + goto fail; + } + + int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1); + if (ret != 1) { + printf("%d: Error unlinking lb port\n", __LINE__); + goto fail; + } + + queue_id = 1; + if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { + printf("%d: error creating link for qid\n", __LINE__); + goto fail; + } + + queue_id = 0; + int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +ordered_reconfigure(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + const struct rte_event_queue_conf conf = { + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto failed; + } + + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid, for 2nd time\n", __LINE__); + goto failed; + } + + rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + cleanup(t); + return 0; +failed: + cleanup(t); + return -1; +} + +static int +invalid_qid(struct test *t) +{ + struct test_event_dev_stats stats; + const int rx_enq = 0; + int err; + uint32_t i; + + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + for (i = 0; i < 4; i++) { + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], + NULL, 1); + if (err != 1) { + printf("%d: error mapping port 1 qid\n", __LINE__); + return -1; + } + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* + * Send in a packet with an invalid qid to the scheduler. + * We should see the packed enqueued OK, but the inflights for + * that packet should not be incremented, and the rx_dropped + * should be incremented. + */ + static uint32_t flows1[] = {20}; + + for (i = 0; i < RTE_DIM(flows1); i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0] + flows1[i], + .flow_id = i, + .mbuf = arp, + }; + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + /* call the scheduler */ + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + /* + * Now check the resulting inflights on the port, and the rx_dropped. + */ + if (stats.port_inflight[0] != 0) { + printf("%d:%s: port 1 inflight count not correct\n", __LINE__, + __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + if (stats.port_rx_dropped[0] != 1) { + printf("%d:%s: port 1 drops\n", __LINE__, __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + /* each packet drop should only be counted in one place - port or dev */ + if (stats.rx_dropped != 0) { + printf("%d:%s: port 1 dropped count not correct\n", __LINE__, + __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + cleanup(t); + return 0; +} + +static int +single_packet(struct test *t) +{ + const uint32_t MAGIC_SEQN = 7321; + struct rte_event ev; + struct test_event_dev_stats stats; + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** Gen pkt and enqueue ****************/ + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + ev.op = RTE_EVENT_OP_NEW; + ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + ev.mbuf = arp; + ev.queue_id = 0; + ev.flow_id = 3; + arp->seqn = MAGIC_SEQN; + + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + if (stats.rx_pkts != 1 || + stats.tx_pkts != 1 || + stats.port_inflight[wrk_enq] != 1) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + uint32_t deq_pkts; + + deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0); + if (deq_pkts < 1) { + printf("%d: Failed to deq\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (ev.mbuf->seqn != MAGIC_SEQN) { + printf("%d: magic sequence number not dequeued\n", __LINE__); + return -1; + } + + rte_pktmbuf_free(ev.mbuf); + err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[wrk_enq] != 0) { + printf("%d: port inflight not correct\n", __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +inflight_counts(struct test *t) +{ + struct rte_event ev; + struct test_event_dev_stats stats; + const int rx_enq = 0; + const int p1 = 1; + const int p2 = 2; + int err; + int i; + + /* Create instance with 4 ports */ + if (init(t, 2, 3) < 0 || + create_ports(t, 3) < 0 || + create_atomic_qids(t, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ +#define QID1_NUM 5 + for (i = 0; i < QID1_NUM; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto err; + } + + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + goto err; + } + } +#define QID2_NUM 3 + for (i = 0; i < QID2_NUM; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto err; + } + ev.queue_id = t->qid[1]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + goto err; + } + } + + /* schedule */ + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + goto err; + } + + if (stats.rx_pkts != QID1_NUM + QID2_NUM || + stats.tx_pkts != QID1_NUM + QID2_NUM) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + goto err; + } + + if (stats.port_inflight[p1] != QID1_NUM) { + printf("%d: %s port 1 inflight not correct\n", __LINE__, + __func__); + goto err; + } + if (stats.port_inflight[p2] != QID2_NUM) { + printf("%d: %s port 2 inflight not correct\n", __LINE__, + __func__); + goto err; + } + + /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/ + /* port 1 */ + struct rte_event events[QID1_NUM + QID2_NUM]; + uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events, + RTE_DIM(events), 0); + + if (deq_pkts != QID1_NUM) { + printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__); + goto err; + } + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p1] != QID1_NUM) { + printf("%d: port 1 inflight decrement after DEQ != 0\n", + __LINE__); + goto err; + } + for (i = 0; i < QID1_NUM; i++) { + err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev, + 1); + if (err != 1) { + printf("%d: %s rte enqueue of inf release failed\n", + __LINE__, __func__); + goto err; + } + } + + /* + * As the scheduler core decrements inflights, it needs to run to + * process packets to act on the drop messages + */ + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p1] != 0) { + printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__); + goto err; + } + + /* port2 */ + deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events, + RTE_DIM(events), 0); + if (deq_pkts != QID2_NUM) { + printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__); + goto err; + } + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p2] != QID2_NUM) { + printf("%d: port 1 inflight decrement after DEQ != 0\n", + __LINE__); + goto err; + } + for (i = 0; i < QID2_NUM; i++) { + err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev, + 1); + if (err != 1) { + printf("%d: %s rte enqueue of inf release failed\n", + __LINE__, __func__); + goto err; + } + } + + /* + * As the scheduler core decrements inflights, it needs to run to + * process packets to act on the drop messages + */ + rte_event_schedule(evdev); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p2] != 0) { + printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__); + goto err; + } + cleanup(t); + return 0; + +err: + rte_event_dev_dump(evdev, stdout); + cleanup(t); + return -1; +} + +static int +parallel_basic(struct test *t, int check_order) +{ + const uint8_t rx_port = 0; + const uint8_t w1_port = 1; + const uint8_t w3_port = 3; + const uint8_t tx_port = 4; + int err; + int i; + uint32_t deq_pkts, j; + struct rte_mbuf *mbufs[3]; + struct rte_mbuf *mbufs_out[3]; + const uint32_t MAGIC_SEQN = 1234; + + /* Create instance with 4 ports */ + if (init(t, 2, tx_port + 1) < 0 || + create_ports(t, tx_port + 1) < 0 || + (check_order ? create_ordered_qids(t, 1) : + create_unordered_qids(t, 1)) < 0 || + create_directed_qids(t, 1, &tx_port)) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* + * CQ mapping to QID + * We need three ports, all mapped to the same ordered qid0. Then we'll + * take a packet out to each port, re-enqueue in reverse order, + * then make sure the reordering has taken place properly when we + * dequeue from the tx_port. + * + * Simplified test setup diagram: + * + * rx_port w1_port + * \ / \ + * qid0 - w2_port - qid1 + * \ / \ + * w3_port tx_port + */ + /* CQ mapping to QID for LB ports (directed mapped on create) */ + for (i = w1_port; i <= w3_port; i++) { + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, + 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* Enqueue 3 packets to the rx port */ + for (i = 0; i < 3; i++) { + struct rte_event ev; + mbufs[i] = rte_gen_arp(0, t->mbuf_pool); + if (!mbufs[i]) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = mbufs[i]; + mbufs[i]->seqn = MAGIC_SEQN + i; + + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue pkt %u, retval = %u\n", + __LINE__, i, err); + return -1; + } + } + + rte_event_schedule(evdev); + + /* use extra slot to make logic in loops easier */ + struct rte_event deq_ev[w3_port + 1]; + + /* Dequeue the 3 packets, one from each worker port */ + for (i = w1_port; i <= w3_port; i++) { + deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], + &deq_ev[i], 1, 0); + if (deq_pkts != 1) { + printf("%d: Failed to deq\n", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + } + + /* Enqueue each packet in reverse order, flushing after each one */ + for (i = w3_port; i >= w1_port; i--) { + + deq_ev[i].op = RTE_EVENT_OP_FORWARD; + deq_ev[i].queue_id = t->qid[1]; + err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + rte_event_schedule(evdev); + + /* dequeue from the tx ports, we should get 3 packets */ + deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, + 3, 0); + + /* Check to see if we've got all 3 packets */ + if (deq_pkts != 3) { + printf("%d: expected 3 pkts at tx port got %d from port %d\n", + __LINE__, deq_pkts, tx_port); + rte_event_dev_dump(evdev, stdout); + return 1; + } + + /* Check to see if the sequence numbers are in expected order */ + if (check_order) { + for (j = 0 ; j < deq_pkts ; j++) { + if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) { + printf( + "%d: Incorrect sequence number(%d) from port %d\n", + __LINE__, mbufs_out[j]->seqn, tx_port); + return -1; + } + } + } + + /* Destroy the instance */ + cleanup(t); + return 0; +} + +static int +ordered_basic(struct test *t) +{ + return parallel_basic(t, 1); +} + +static int +unordered_basic(struct test *t) +{ + return parallel_basic(t, 0); +} + static struct rte_mempool *eventdev_func_mempool; static int test_sw_eventdev(void) { struct test *t = malloc(sizeof(struct test)); + int ret; + + /* manually initialize the op, older gcc's complain on static + * initialization of struct elements that are a bitfield. + */ + release_ev.op = RTE_EVENT_OP_RELEASE; const char *eventdev_name = "event_sw0"; evdev = rte_event_dev_get_dev_id(eventdev_name); @@ -346,6 +1340,72 @@ test_sw_eventdev(void) } t->mbuf_pool = eventdev_func_mempool; + printf("*** Running Single Directed Packet test...\n"); + ret = test_single_directed_packet(t); + if (ret != 0) { + printf("ERROR - Single Directed Packet test FAILED.\n"); + return ret; + } + printf("*** Running Single Load Balanced Packet test...\n"); + ret = single_packet(t); + if (ret != 0) { + printf("ERROR - Single Packet test FAILED.\n"); + return ret; + } + printf("*** Running Unordered Basic test...\n"); + ret = unordered_basic(t); + if (ret != 0) { + printf("ERROR - Unordered Basic test FAILED.\n"); + return ret; + } + printf("*** Running Ordered Basic test...\n"); + ret = ordered_basic(t); + if (ret != 0) { + printf("ERROR - Ordered Basic test FAILED.\n"); + return ret; + } + printf("*** Running Burst Packets test...\n"); + ret = burst_packets(t); + if (ret != 0) { + printf("ERROR - Burst Packets test FAILED.\n"); + return ret; + } + printf("*** Running Invalid QID test...\n"); + ret = invalid_qid(t); + if (ret != 0) { + printf("ERROR - Invalid QID test FAILED.\n"); + return ret; + } + printf("*** Running Inflight Count test...\n"); + ret = inflight_counts(t); + if (ret != 0) { + printf("ERROR - Inflight Count test FAILED.\n"); + return ret; + } + printf("*** Running Abuse Inflights test...\n"); + ret = abuse_inflights(t); + if (ret != 0) { + printf("ERROR - Abuse Inflights test FAILED.\n"); + return ret; + } + printf("*** Running Ordered Reconfigure test...\n"); + ret = ordered_reconfigure(t); + if (ret != 0) { + printf("ERROR - Ordered Reconfigure test FAILED.\n"); + return ret; + } + printf("*** Running Port LB Single Reconfig test...\n"); + ret = port_single_lb_reconfig(t); + if (ret != 0) { + printf("ERROR - Port LB Single Reconfig test FAILED.\n"); + return ret; + } + printf("*** Running Port Reconfig Credits test...\n"); + ret = port_reconfig_credits(t); + if (ret != 0) { + printf("ERROR - Port Reconfig Credits Reset test FAILED.\n"); + return ret; + } /* * Free test instance, leaving mempool initialized, and a pointer to it * in static eventdev_func_mempool, as it is re-used on re-runs -- 2.7.4