From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3B300A04C1; Fri, 22 Nov 2019 06:34:04 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 284AB2C28; Fri, 22 Nov 2019 06:34:04 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id E23072C08 for ; Fri, 22 Nov 2019 06:34:02 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Nov 2019 21:34:02 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,228,1571727600"; d="scan'208";a="290555578" Received: from fmsmsx103.amr.corp.intel.com ([10.18.124.201]) by orsmga001.jf.intel.com with ESMTP; 21 Nov 2019 21:34:00 -0800 Received: from fmsmsx157.amr.corp.intel.com (10.18.116.73) by FMSMSX103.amr.corp.intel.com (10.18.124.201) with Microsoft SMTP Server (TLS) id 14.3.439.0; Thu, 21 Nov 2019 21:34:01 -0800 Received: from shsmsx107.ccr.corp.intel.com (10.239.4.96) by FMSMSX157.amr.corp.intel.com (10.18.116.73) with Microsoft SMTP Server (TLS) id 14.3.439.0; Thu, 21 Nov 2019 21:34:01 -0800 Received: from shsmsx101.ccr.corp.intel.com ([169.254.1.213]) by SHSMSX107.ccr.corp.intel.com ([169.254.9.63]) with mapi id 14.03.0439.000; Fri, 22 Nov 2019 13:33:59 +0800 From: "Tu, Lijuan" To: "Ma, LihongX" , "dts@dpdk.org" CC: "Zhang, Yuwei1" , "Ma, LihongX" Thread-Topic: [dts] [PATCH V1] tests/eventdev_pipeline: add test case about check load-balance behavior Thread-Index: AQHVks6iRwkR+RU/DESWzDPD+aAvMKeWxzoQ Date: Fri, 22 Nov 2019 05:33:59 +0000 Message-ID: <8CE3E05A3F976642AAB0F4675D0AD20E0BB676E2@SHSMSX101.ccr.corp.intel.com> References: <1572817690-3849-1-git-send-email-lihongx.ma@intel.com> In-Reply-To: <1572817690-3849-1-git-send-email-lihongx.ma@intel.com> Accept-Language: zh-CN, en-US Content-Language: en-US X-MS-Has-Attach: X-MS-TNEF-Correlator: dlp-product: dlpe-windows dlp-version: 11.2.0.6 dlp-reaction: no-action x-ctpclassification: CTP_NT x-titus-metadata-40: eyJDYXRlZ29yeUxhYmVscyI6IiIsIk1ldGFkYXRhIjp7Im5zIjoiaHR0cDpcL1wvd3d3LnRpdHVzLmNvbVwvbnNcL0ludGVsMyIsImlkIjoiNDdiYzM1YTYtYWRlYS00ODRhLWJiOTAtMmZiMzg1YWYwZGEzIiwicHJvcHMiOlt7Im4iOiJDVFBDbGFzc2lmaWNhdGlvbiIsInZhbHMiOlt7InZhbHVlIjoiQ1RQX05UIn1dfV19LCJTdWJqZWN0TGFiZWxzIjpbXSwiVE1DVmVyc2lvbiI6IjE3LjEwLjE4MDQuNDkiLCJUcnVzdGVkTGFiZWxIYXNoIjoidTZzZXFuRTNmYThsQXFxU1dYOFRUS0NabXJkNnVhT1RFUTJzTk5kbVFHbTdNT3R6ak1vUkpLbm1oS2t6ZVQ5bCJ9 x-originating-ip: [10.239.127.40] Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Subject: Re: [dts] [PATCH V1] tests/eventdev_pipeline: add test case about check load-balance behavior X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" Applied, thanks > -----Original Message----- > From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong > Sent: Monday, November 4, 2019 5:48 AM > To: dts@dpdk.org > Cc: Zhang, Yuwei1 ; Ma, LihongX > > Subject: [dts] [PATCH V1] tests/eventdev_pipeline: add test case about ch= eck > load-balance behavior >=20 > 1. optimization code and use Packet to create/send pkt 2. use funtion > create_eal_parameters to get a eal params to launch app 3. add cases abou= t > check load-balance behavior with default/order/parallel type >=20 > Signed-off-by: lihong > --- > tests/TestSuite_eventdev_pipeline.py | 168 +++++++++++++++++++++++++--- > ------- > 1 file changed, 121 insertions(+), 47 deletions(-) >=20 > diff --git a/tests/TestSuite_eventdev_pipeline.py > b/tests/TestSuite_eventdev_pipeline.py > index 9f723cd..1e99389 100644 > --- a/tests/TestSuite_eventdev_pipeline.py > +++ b/tests/TestSuite_eventdev_pipeline.py > @@ -40,6 +40,7 @@ import re > from test_case import TestCase > import scapy.layers.inet > from scapy.utils import rdpcap > +from packet import Packet >=20 >=20 > class TestEventdevPipeline(TestCase): > @@ -50,7 +51,7 @@ class TestEventdevPipeline(TestCase): > """ > self.works =3D 4 > self.packet_num =3D 96 > - self.core_config =3D "1S/7C/1T" > + self.core_config =3D "1S/8C/1T" > self.build_eventdev_app() >=20 > self.dut_ports =3D self.dut.get_ports() @@ -59,18 +60,17 @@ clas= s > TestEventdevPipeline(TestCase): > self.ports_socket =3D self.dut.get_numa_id(self.dut_ports[0]) > self.core_list =3D self.dut.get_core_list( > self.core_config, socket=3Dself.ports_socket) > - self.core_list_rx =3D self.core_list[0:1] > - self.core_list_tx =3D self.core_list[1:2] > - self.core_list_sd =3D self.core_list[2:3] > - self.core_list_wk =3D self.core_list[3:7] > + self.verify(len(self.core_list) >=3D 8, 'sever no enough cores t= o run this > suite') > + self.core_list_rx =3D self.core_list[1:2] > + self.core_list_tx =3D self.core_list[2:3] > + self.core_list_sd =3D self.core_list[3:4] > + self.core_list_wk =3D self.core_list[4:8] > self.core_mask_rx =3D utils.create_mask(self.core_list_rx) > self.core_mask_tx =3D utils.create_mask(self.core_list_tx) > self.core_mask_sd =3D utils.create_mask(self.core_list_sd) > self.core_mask_wk =3D utils.create_mask(self.core_list_wk) >=20 > - self.core_list =3D ",".join(self.core_list) > - pre =3D int(self.core_list[0]) - 1 > - self.core_list =3D str(pre) + "," + self.core_list > + self.taskset_core_list =3D ",".join(self.core_list) >=20 > self.rx_port =3D self.tester.get_local_port(self.dut_ports[0]) > self.tx_port =3D self.rx_port > @@ -82,7 +82,7 @@ class TestEventdevPipeline(TestCase): > """ > Run before each test case. > """ > - self.dut.send_expect("killall -s INT eventdev_pipeline", "#") > + pass >=20 > def build_eventdev_app(self): > self.app_command =3D "examples/eventdev_pipeline" > @@ -95,12 +95,14 @@ class TestEventdevPipeline(TestCase): > """ > run eventdev_pipeline command > """ > + eal_params =3D self.dut.create_eal_parameters(cores=3Dself.core_= list, > + ports=3D[self.dut.ports_info[0]['pci']]) > command_line =3D "taskset -c %s " + self.app_command + \ > - "/build/app/eventdev_pipeline " + \ > + "/build/app/eventdev_pipeline %s " + \ > "--vdev event_sw0 -- -r%s -t%s -e%s -w %s -s1 -n0= -c32 - > W1000 %s -D" > command_line =3D command_line % ( > - self.core_list, self.core_mask_rx, self.core_mask_tx= , > - self.core_mask_sd, self.core_mask_wk, cmd_type) > + self.taskset_core_list, eal_params, self.core_mask_r= x, > + self.core_mask_tx, self.core_mask_sd, > + self.core_mask_wk, cmd_type) > self.dut.send_expect(command_line, "Port 0", 30) >=20 > out =3D self.dut.get_session_output() @@ -109,8 +111,8 @@ class > TestEventdevPipeline(TestCase): > self.verify("executing scheduler" in out, "lcore of scheduler no= t right") > self.verify("executing worker" in out, "lcore of worker not righ= t") >=20 > - def remove_dhcp_from_revpackets(self, inst): > - pkts =3D self.tester.load_tcpdump_sniff_packets(inst) > + def remove_dhcp_from_revpackets(self, inst, timeout=3D3): > + pkts =3D self.tester.load_tcpdump_sniff_packets(inst, timeout) > i =3D 0 > while len(pkts) !=3D 0 and i <=3D len(pkts) - 1: > if pkts[i].haslayer('DHCP'): > @@ -119,7 +121,7 @@ class TestEventdevPipeline(TestCase): > i =3D i + 1 > return pkts >=20 > - def send_ordered_packet(self): > + def send_ordered_packet(self, count=3D1): > """ > send the packets with ordered of src-ip info > worker dequeue depth of 32, so the packet number is multiple of = 32 is > better @@ -129,37 +131,58 @@ class TestEventdevPipeline(TestCase): > if has eight flow, the pcap has 8 couples with diff 5 tuple, and= each > couple load info from > 000001 to 000012 > """ > + pkt =3D Packet() > for queue in range(self.queues): > - src_ip =3D "11.12.13.%d" % (queue+1) > - pay_load =3D "000001" > - flow_info =3D 'flow1 =3D > [Ether(dst=3D"%s",src=3D"%s")/IP(src=3D"%s")/UDP(sport=3D123, dport=3D12)= /("%s")]' > - self.tester.scapy_append(flow_info % (self.d_mac, self.s_mac= , src_ip, > pay_load)) > - for i in range(1, self.packet_num/self.queues): > - pay_load =3D "0000%.2d" % (i+1) > - self.tester.scapy_append('flow_temp =3D [Ether(dst=3D"%s= ", > src=3D"%s")/IP(src=3D"%s")/UDP(sport=3D123, dport=3D12)/("%s")]' > - % (self.d_mac, self.s_mac, src_i= p, pay_load)) > - if i =3D=3D 1: > - self.tester.scapy_append('flow2 =3D flow_temp') > - else: > - self.tester.scapy_append('flow2 =3D flow2 + flow_tem= p') > - if queue =3D=3D 0: > - self.tester.scapy_append('flow =3D flow1 + flow2') > - else: > - self.tester.scapy_append('flow =3D flow + flow1 + flow2'= ) > - > - self.tester.scapy_append('wrpcap("pipeline.pcap", flow)') > - self.tester.scapy_execute() > - time.sleep(5) > + config_opt =3D [('ether', {'dst': self.d_mac, 'src': self.s_= mac, 'src': > self.s_mac}), > + ('ipv4', {'src': '11.12.13.%d' % (queue+1), 'dst= ': '11.12.1.1'}), > + ('udp', {'src': 123, 'dst': 12})] > + # if only one queue, create self.packet_num with same 5 tupl= e > + # if multi queue, create self.packet_num with diff 5 tuple, > + # each tuple have (self.packet_num//self.queues) pkts > + pkt_num =3D self.packet_num//self.queues > + pkt.generate_random_pkts(pktnum=3Dpkt_num, random_type=3D['U= DP'], > ip_increase=3DFalse, > + random_payload=3DFalse, options=3D{'laye= rs_config': > config_opt}) > + # config raw info in pkts > + for i in range(pkt_num): > + payload =3D "0000%.2d" % (i+1) > + pkt.pktgen.pkts[i + pkt_num*queue]['Raw'].load =3D > + payload >=20 > filt =3D [{'layer': 'ether', 'config': {'src': '%s' % self.s_mac= }}] > inst =3D self.tester.tcpdump_sniff_packets(self.rx_interface, fi= lters=3Dfilt) > - self.tester.scapy_append('pkt=3Drdpcap("pipeline.pcap")') > - self.tester.scapy_append('sendp(pkt, iface=3D"%s")' % self.tx_in= terface) > - self.tester.scapy_execute() > - time.sleep(5) > + pkt.send_pkt(crb=3Dself.tester, tx_port=3Dself.tx_interface, > + count=3Dcount, timeout=3D300) > self.pkts =3D self.remove_dhcp_from_revpackets(inst) >=20 > - def check_packet_order(self): > + def check_load_balance_behavior(self, case_info): > + """ > + check the load-balance bahavior by the workload of every worker > + the send pkts number is 96*100, and each worker received pkts > number should > + smaller than 2760 and greather than 2040 > + """ > + self.send_ordered_packet(count=3D100) > + # exit the eventdev_pipeline app > + # and get the output info > + self.dut.send_expect('^c', 'Signal') > + out =3D self.dut.get_session_output(timeout=3D3) > + work_rx =3D [] > + for wk in self.core_list_wk: > + one_info =3D re.search('worker\s*%s\s*thread > done.\s*RX=3D(\d*)\s*TX=3D(\d*)' % str(wk), out) > + self.verify(one_info is not None and len(one_info.groups()) = =3D=3D 2 > + and int(one_info.group(1)) > 0, > + "%s can not get the worker rx and tx packets inf= o from > output" % case_info) > + work_info =3D {'work': int(wk), 'rx': int(one_info.group(1))= , 'tx': > int(one_info.group(2))} > + work_rx.append(work_info) > + # get all received pkts > + all_rx =3D 0 > + for wk in work_rx: > + all_rx +=3D wk['rx'] > + ave_rx =3D all_rx//len(work_rx) > + for wk in work_rx: > + self.verify(wk['rx'] <=3D ave_rx + ave_rx*0.15 and wk['rx'] = >=3D ave_rx - > ave_rx*0.15, > + '%s : the work thread rx is not balance, all_rx: %d, wor= k %d rx > is %d' % ( > + case_info, all_rx, wk['work'], wk['rx'])) > + self.logger.info('%s : worker thread %d received %d pkts' % > + (case_info, wk['work'], wk['rx'])) > + > + def check_packet_order(self, case_info): > """ > observe the packets sended by scapy, check the packets order > """ > @@ -176,7 +199,7 @@ class TestEventdevPipeline(TestCase): > packet_index =3D int(self.pkts[i]['Raw'].load[-2= :]) > pay_load =3D "0000%.2d" % (packet_index) > self.verify(self.pkts[i]['Raw'].load =3D=3D pay_load= , > - "The packets not ordered") > + "%s : The packets not ordered" % case_info) > packet_index =3D packet_index + 1 >=20 > def test_keep_packet_order_with_ordered_stage(self): > @@ -185,32 +208,83 @@ class TestEventdevPipeline(TestCase): > according to the tcpdump may be capture the packets whitch not b= elong > current > flow, so set different src_mac of flow to identify the packets > """ > + self.logger.info('check keep packet order about single-flow') > self.lanuch_eventdev_pipeline("-o") > self.queues =3D 1 > self.s_mac =3D "00:00:00:00:00:00" > - self.check_packet_order() > + self.check_packet_order('single-flow') > + self.logger.info('check keep packet order about multi-flow') > self.s_mac =3D "00:00:00:00:00:01" > self.queues =3D 8 > - self.check_packet_order() > + self.check_packet_order('multi-flow') >=20 > def test_keep_packet_order_with_default_stage(self): > """ > keep the packets order with atomic stage in single-flow and mult= i-flow > """ > + self.logger.info('check keep packet order about single-flow') > self.lanuch_eventdev_pipeline(" ") > self.queues =3D 1 > self.s_mac =3D "00:00:00:00:00:02" > - self.check_packet_order() > + self.check_packet_order('single-flow') > + self.logger.info('check keep packet order about multi-flow') > self.s_mac =3D "00:00:00:00:00:03" > self.queues =3D 8 > - self.check_packet_order() > + self.check_packet_order('multi-flow') > + > + def test_check_load_balance_behavior_with_default_type(self): > + """ > + Check load-balance behavior with default type in single-flow and= multi- > flow situations > + """ > + self.logger.info('check load balance about single-flow') > + self.lanuch_eventdev_pipeline(" ") > + self.queues =3D 1 > + self.s_mac =3D "00:00:00:00:00:04" > + self.check_load_balance_behavior('single-flow') > + > + self.logger.info('check load balance about multi-flow') > + self.lanuch_eventdev_pipeline(" ") > + self.queues =3D 8 > + self.s_mac =3D "00:00:00:00:00:05" > + self.check_load_balance_behavior('multi-flow') > + > + def test_check_load_balance_behavior_with_order_type(self): > + """ > + Check load-balance behavior with order type stage in single-flow= and > multi-flow situations > + """ > + self.logger.info('check load balance about single-flow') > + self.lanuch_eventdev_pipeline("-o") > + self.queues =3D 1 > + self.s_mac =3D "00:00:00:00:00:06" > + self.check_load_balance_behavior('single-flow') > + > + self.logger.info('check load balance about multi-flow') > + self.lanuch_eventdev_pipeline("-o") > + self.queues =3D 8 > + self.s_mac =3D "00:00:00:00:00:07" > + self.check_load_balance_behavior('multi-flow') > + > + def test_check_load_balance_behavior_with_parallel_type(self): > + """ > + Check load-balance behavior with parallel type stage in single-f= low and > multi-flow situations > + """ > + self.logger.info('check load balance about single-flow') > + self.lanuch_eventdev_pipeline("-p") > + self.queues =3D 1 > + self.s_mac =3D "00:00:00:00:00:08" > + self.check_load_balance_behavior('single-flow') > + > + self.logger.info('check load balance about multi-flow') > + self.lanuch_eventdev_pipeline("-p") > + self.queues =3D 8 > + self.s_mac =3D "00:00:00:00:00:09" > + self.check_load_balance_behavior('multi-flow') >=20 > def tear_down(self): > """ > Run after each test case. > """ > - self.dut.send_expect("^c", "#", 10) > - self.dut.send_expect("killall -s INT eventdev_pipeline", "#") > + self.dut.kill_all() > time.sleep(5) >=20 > def tear_down_all(self): > -- > 2.7.4