From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 923BEA034E; Thu, 7 Nov 2019 08:03:34 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4B90D1E8F2; Thu, 7 Nov 2019 08:03:34 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 28D231E87D for ; Thu, 7 Nov 2019 08:03:31 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 06 Nov 2019 23:03:31 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.68,277,1569308400"; d="scan'208";a="404006603" Received: from fmsmsx104.amr.corp.intel.com ([10.18.124.202]) by fmsmga006.fm.intel.com with ESMTP; 06 Nov 2019 23:03:31 -0800 Received: from fmsmsx102.amr.corp.intel.com (10.18.124.200) by fmsmsx104.amr.corp.intel.com (10.18.124.202) with Microsoft SMTP Server (TLS) id 14.3.439.0; Wed, 6 Nov 2019 23:03:30 -0800 Received: from shsmsx151.ccr.corp.intel.com (10.239.6.50) by FMSMSX102.amr.corp.intel.com (10.18.124.200) with Microsoft SMTP Server (TLS) id 14.3.439.0; Wed, 6 Nov 2019 23:03:30 -0800 Received: from shsmsx102.ccr.corp.intel.com ([169.254.2.108]) by SHSMSX151.ccr.corp.intel.com ([169.254.3.149]) with mapi id 14.03.0439.000; Thu, 7 Nov 2019 15:03:28 +0800 From: "Zhang, Yuwei1" To: "Ma, LihongX" , "dts@dpdk.org" Thread-Topic: [dts][PATCH V1] tests/eventdev_pipeline: add test case about check load-balance behavior Thread-Index: AQHVks6YdAIerDl+ikK8x4is/F6fWKd/TTYA Date: Thu, 7 Nov 2019 07:03:27 +0000 Message-ID: References: <1572817690-3849-1-git-send-email-lihongx.ma@intel.com> In-Reply-To: <1572817690-3849-1-git-send-email-lihongx.ma@intel.com> Accept-Language: en-US Content-Language: en-US X-MS-Has-Attach: X-MS-TNEF-Correlator: dlp-product: dlpe-windows dlp-version: 11.2.0.6 dlp-reaction: no-action x-originating-ip: [10.239.127.40] Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Subject: Re: [dts] [PATCH V1] tests/eventdev_pipeline: add test case about check load-balance behavior X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" Acked-by: Yuwei Zhang Regards, Yuwei -----Original Message----- From: Ma, LihongX =20 Sent: Monday, November 4, 2019 5:48 AM To: dts@dpdk.org Cc: Zhang, Yuwei1 ; Ma, LihongX Subject: [dts][PATCH V1] tests/eventdev_pipeline: add test case about check= load-balance behavior 1. optimization code and use Packet to create/send pkt 2. use funtion creat= e_eal_parameters to get a eal params to launch app 3. add cases about check= load-balance behavior with default/order/parallel type Signed-off-by: lihong --- tests/TestSuite_eventdev_pipeline.py | 168 +++++++++++++++++++++++++------= ---- 1 file changed, 121 insertions(+), 47 deletions(-) diff --git a/tests/TestSuite_eventdev_pipeline.py b/tests/TestSuite_eventde= v_pipeline.py index 9f723cd..1e99389 100644 --- a/tests/TestSuite_eventdev_pipeline.py +++ b/tests/TestSuite_eventdev_pipeline.py @@ -40,6 +40,7 @@ import re from test_case import TestCase import scapy.layers.inet from scapy.utils import rdpcap +from packet import Packet =20 =20 class TestEventdevPipeline(TestCase): @@ -50,7 +51,7 @@ class TestEventdevPipeline(TestCase): """ self.works =3D 4 self.packet_num =3D 96 - self.core_config =3D "1S/7C/1T" + self.core_config =3D "1S/8C/1T" self.build_eventdev_app() =20 self.dut_ports =3D self.dut.get_ports() @@ -59,18 +60,17 @@ class = TestEventdevPipeline(TestCase): self.ports_socket =3D self.dut.get_numa_id(self.dut_ports[0]) self.core_list =3D self.dut.get_core_list( self.core_config, socket=3Dself.ports_socket) - self.core_list_rx =3D self.core_list[0:1] - self.core_list_tx =3D self.core_list[1:2] - self.core_list_sd =3D self.core_list[2:3] - self.core_list_wk =3D self.core_list[3:7] + self.verify(len(self.core_list) >=3D 8, 'sever no enough cores to = run this suite') + self.core_list_rx =3D self.core_list[1:2] + self.core_list_tx =3D self.core_list[2:3] + self.core_list_sd =3D self.core_list[3:4] + self.core_list_wk =3D self.core_list[4:8] self.core_mask_rx =3D utils.create_mask(self.core_list_rx) self.core_mask_tx =3D utils.create_mask(self.core_list_tx) self.core_mask_sd =3D utils.create_mask(self.core_list_sd) self.core_mask_wk =3D utils.create_mask(self.core_list_wk) =20 - self.core_list =3D ",".join(self.core_list) - pre =3D int(self.core_list[0]) - 1 - self.core_list =3D str(pre) + "," + self.core_list + self.taskset_core_list =3D ",".join(self.core_list) =20 self.rx_port =3D self.tester.get_local_port(self.dut_ports[0]) self.tx_port =3D self.rx_port @@ -82,7 +82,7 @@ class TestEventdevPipeline(TestCase): """ Run before each test case. """ - self.dut.send_expect("killall -s INT eventdev_pipeline", "#") + pass =20 def build_eventdev_app(self): self.app_command =3D "examples/eventdev_pipeline" @@ -95,12 +95,14 @@ class TestEventdevPipeline(TestCase): """ run eventdev_pipeline command """ + eal_params =3D self.dut.create_eal_parameters(cores=3Dself.core_li= st, + ports=3D[self.dut.ports_info[0]['pci']]) command_line =3D "taskset -c %s " + self.app_command + \ - "/build/app/eventdev_pipeline " + \ + "/build/app/eventdev_pipeline %s " + \ "--vdev event_sw0 -- -r%s -t%s -e%s -w %s -s1 -n0 -= c32 -W1000 %s -D" command_line =3D command_line % ( - self.core_list, self.core_mask_rx, self.core_mask_tx, - self.core_mask_sd, self.core_mask_wk, cmd_type) + self.taskset_core_list, eal_params, self.core_mask_rx, + self.core_mask_tx, self.core_mask_sd,=20 + self.core_mask_wk, cmd_type) self.dut.send_expect(command_line, "Port 0", 30) =20 out =3D self.dut.get_session_output() @@ -109,8 +111,8 @@ class Te= stEventdevPipeline(TestCase): self.verify("executing scheduler" in out, "lcore of scheduler not = right") self.verify("executing worker" in out, "lcore of worker not right"= ) =20 - def remove_dhcp_from_revpackets(self, inst): - pkts =3D self.tester.load_tcpdump_sniff_packets(inst) + def remove_dhcp_from_revpackets(self, inst, timeout=3D3): + pkts =3D self.tester.load_tcpdump_sniff_packets(inst, timeout) i =3D 0 while len(pkts) !=3D 0 and i <=3D len(pkts) - 1: if pkts[i].haslayer('DHCP'): @@ -119,7 +121,7 @@ class TestEventdevPipeline(TestCase): i =3D i + 1 return pkts =20 - def send_ordered_packet(self): + def send_ordered_packet(self, count=3D1): """ send the packets with ordered of src-ip info worker dequeue depth of 32, so the packet number is multiple of 32= is better @@ -129,37 +131,58 @@ class TestEventdevPipeline(TestCase): if has eight flow, the pcap has 8 couples with diff 5 tuple, and e= ach couple load info from 000001 to 000012 """ + pkt =3D Packet() for queue in range(self.queues): - src_ip =3D "11.12.13.%d" % (queue+1) - pay_load =3D "000001" - flow_info =3D 'flow1 =3D [Ether(dst=3D"%s",src=3D"%s")/IP(src= =3D"%s")/UDP(sport=3D123, dport=3D12)/("%s")]' - self.tester.scapy_append(flow_info % (self.d_mac, self.s_mac, = src_ip, pay_load)) - for i in range(1, self.packet_num/self.queues): - pay_load =3D "0000%.2d" % (i+1) - self.tester.scapy_append('flow_temp =3D [Ether(dst=3D"%s",= src=3D"%s")/IP(src=3D"%s")/UDP(sport=3D123, dport=3D12)/("%s")]' - % (self.d_mac, self.s_mac, src_ip,= pay_load)) - if i =3D=3D 1: - self.tester.scapy_append('flow2 =3D flow_temp') - else: - self.tester.scapy_append('flow2 =3D flow2 + flow_temp'= ) - if queue =3D=3D 0: - self.tester.scapy_append('flow =3D flow1 + flow2') - else: - self.tester.scapy_append('flow =3D flow + flow1 + flow2') - - self.tester.scapy_append('wrpcap("pipeline.pcap", flow)') - self.tester.scapy_execute() - time.sleep(5) + config_opt =3D [('ether', {'dst': self.d_mac, 'src': self.s_ma= c, 'src': self.s_mac}), + ('ipv4', {'src': '11.12.13.%d' % (queue+1), 'dst':= '11.12.1.1'}), + ('udp', {'src': 123, 'dst': 12})] + # if only one queue, create self.packet_num with same 5 tuple + # if multi queue, create self.packet_num with diff 5 tuple, + # each tuple have (self.packet_num//self.queues) pkts + pkt_num =3D self.packet_num//self.queues + pkt.generate_random_pkts(pktnum=3Dpkt_num, random_type=3D['UDP= '], ip_increase=3DFalse, + random_payload=3DFalse, options=3D{'layers= _config': config_opt}) + # config raw info in pkts + for i in range(pkt_num): + payload =3D "0000%.2d" % (i+1) + pkt.pktgen.pkts[i + pkt_num*queue]['Raw'].load =3D=20 + payload =20 filt =3D [{'layer': 'ether', 'config': {'src': '%s' % self.s_mac}}= ] inst =3D self.tester.tcpdump_sniff_packets(self.rx_interface, filt= ers=3Dfilt) - self.tester.scapy_append('pkt=3Drdpcap("pipeline.pcap")') - self.tester.scapy_append('sendp(pkt, iface=3D"%s")' % self.tx_inte= rface) - self.tester.scapy_execute() - time.sleep(5) + pkt.send_pkt(crb=3Dself.tester, tx_port=3Dself.tx_interface,=20 + count=3Dcount, timeout=3D300) self.pkts =3D self.remove_dhcp_from_revpackets(inst) =20 - def check_packet_order(self): + def check_load_balance_behavior(self, case_info): + """ + check the load-balance bahavior by the workload of every worker + the send pkts number is 96*100, and each worker received pkts numb= er should + smaller than 2760 and greather than 2040 + """ + self.send_ordered_packet(count=3D100) + # exit the eventdev_pipeline app + # and get the output info + self.dut.send_expect('^c', 'Signal') + out =3D self.dut.get_session_output(timeout=3D3) + work_rx =3D [] + for wk in self.core_list_wk: + one_info =3D re.search('worker\s*%s\s*thread done.\s*RX=3D(\d*= )\s*TX=3D(\d*)' % str(wk), out) + self.verify(one_info is not None and len(one_info.groups()) = =3D=3D 2 + and int(one_info.group(1)) > 0, + "%s can not get the worker rx and tx packets info = from output" % case_info) + work_info =3D {'work': int(wk), 'rx': int(one_info.group(1)), = 'tx': int(one_info.group(2))} + work_rx.append(work_info) + # get all received pkts + all_rx =3D 0 + for wk in work_rx: + all_rx +=3D wk['rx'] + ave_rx =3D all_rx//len(work_rx) + for wk in work_rx: + self.verify(wk['rx'] <=3D ave_rx + ave_rx*0.15 and wk['rx'] >= =3D ave_rx - ave_rx*0.15, + '%s : the work thread rx is not balance, all_rx: %d, work = %d rx is %d' % ( + case_info, all_rx, wk['work'], wk['rx'])) + self.logger.info('%s : worker thread %d received %d pkts' %=20 + (case_info, wk['work'], wk['rx'])) + + def check_packet_order(self, case_info): """ observe the packets sended by scapy, check the packets order """ @@ -176,7 +199,7 @@ class TestEventdevPipeline(TestCase): packet_index =3D int(self.pkts[i]['Raw'].load[-2:]= ) pay_load =3D "0000%.2d" % (packet_index) self.verify(self.pkts[i]['Raw'].load =3D=3D pay_load, - "The packets not ordered") + "%s : The packets not ordered" % case_info) packet_index =3D packet_index + 1 =20 def test_keep_packet_order_with_ordered_stage(self): @@ -185,32 +208,83 @@ class TestEventdevPipeline(TestCase): according to the tcpdump may be capture the packets whitch not bel= ong current flow, so set different src_mac of flow to identify the packets """ + self.logger.info('check keep packet order about single-flow') self.lanuch_eventdev_pipeline("-o") self.queues =3D 1 self.s_mac =3D "00:00:00:00:00:00" - self.check_packet_order() + self.check_packet_order('single-flow') + self.logger.info('check keep packet order about multi-flow') self.s_mac =3D "00:00:00:00:00:01" self.queues =3D 8 - self.check_packet_order() + self.check_packet_order('multi-flow') =20 def test_keep_packet_order_with_default_stage(self): """ keep the packets order with atomic stage in single-flow and multi-= flow """ + self.logger.info('check keep packet order about single-flow') self.lanuch_eventdev_pipeline(" ") self.queues =3D 1 self.s_mac =3D "00:00:00:00:00:02" - self.check_packet_order() + self.check_packet_order('single-flow') + self.logger.info('check keep packet order about multi-flow') self.s_mac =3D "00:00:00:00:00:03" self.queues =3D 8 - self.check_packet_order() + self.check_packet_order('multi-flow') + + def test_check_load_balance_behavior_with_default_type(self): + """ + Check load-balance behavior with default type in single-flow and m= ulti-flow situations + """ + self.logger.info('check load balance about single-flow') + self.lanuch_eventdev_pipeline(" ") + self.queues =3D 1 + self.s_mac =3D "00:00:00:00:00:04" + self.check_load_balance_behavior('single-flow') + + self.logger.info('check load balance about multi-flow') + self.lanuch_eventdev_pipeline(" ") + self.queues =3D 8 + self.s_mac =3D "00:00:00:00:00:05" + self.check_load_balance_behavior('multi-flow') + + def test_check_load_balance_behavior_with_order_type(self): + """ + Check load-balance behavior with order type stage in single-flow a= nd multi-flow situations + """ + self.logger.info('check load balance about single-flow') + self.lanuch_eventdev_pipeline("-o") + self.queues =3D 1 + self.s_mac =3D "00:00:00:00:00:06" + self.check_load_balance_behavior('single-flow') + + self.logger.info('check load balance about multi-flow') + self.lanuch_eventdev_pipeline("-o") + self.queues =3D 8 + self.s_mac =3D "00:00:00:00:00:07" + self.check_load_balance_behavior('multi-flow') + + def test_check_load_balance_behavior_with_parallel_type(self): + """ + Check load-balance behavior with parallel type stage in single-flo= w and multi-flow situations + """ + self.logger.info('check load balance about single-flow') + self.lanuch_eventdev_pipeline("-p") + self.queues =3D 1 + self.s_mac =3D "00:00:00:00:00:08" + self.check_load_balance_behavior('single-flow') + + self.logger.info('check load balance about multi-flow') + self.lanuch_eventdev_pipeline("-p") + self.queues =3D 8 + self.s_mac =3D "00:00:00:00:00:09" + self.check_load_balance_behavior('multi-flow') =20 def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("^c", "#", 10) - self.dut.send_expect("killall -s INT eventdev_pipeline", "#") + self.dut.kill_all() time.sleep(5) =20 def tear_down_all(self): -- 2.7.4