From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id E3F13A045E for ; Fri, 31 May 2019 09:32:03 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9074F2C55; Fri, 31 May 2019 09:32:03 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 6D0B12C55 for ; Fri, 31 May 2019 09:32:02 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id x4V7P4qA016926 for ; Fri, 31 May 2019 00:32:01 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-type; s=pfpt0818; bh=QRqRivawkCWaGaw5W6yDrquZsxQLkxmJ+GQgq4fBCZg=; b=Gzq89JxmAH1PSEys3ha4tvQrKLLg0qgkDf++ZKrGytO0G1LwZ4Af71UhKYThCsXEru6f g7KS6Sm3hve2cpvJAQj/pg300KiTVKUpnjnU4y2qgyVmjf/Mof8VyHcUfXK7WUaH4c4f WVvjaEbb3JpiU5FAY6Sdc0CIbXznRCkmBECLSmBHK8Eg+ihhN5u6e1dyD0+5PA2WRmuZ 5y36I37scnUuXbw2hMeA5tnvl69ObovcTICNnGJuE0qtRGlYFLFQ+N3zeGLXAPC47ZIx B17dzrjl97iVvq7qN9/pyyc2M6rehWzY0memGpnOdKbOa+Set9U9s4dj4sUNbnn13g7M ew== Received: from sc-exch04.marvell.com ([199.233.58.184]) by mx0a-0016f401.pphosted.com with ESMTP id 2sttf116yy-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Fri, 31 May 2019 00:32:01 -0700 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Fri, 31 May 2019 00:32:00 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Fri, 31 May 2019 00:32:00 -0700 Received: from thaq.marvell.com (unknown [10.28.10.232]) by maili.marvell.com (Postfix) with ESMTP id DBD913F7040; Fri, 31 May 2019 00:31:58 -0700 (PDT) From: To: CC: , , , Thanseerulhaq Date: Fri, 31 May 2019 13:01:30 +0530 Message-ID: <1559287890-27187-1-git-send-email-thaq@marvell.com> X-Mailer: git-send-email 1.8.3.1 MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, , definitions=2019-05-31_04:, , signatures=0 Subject: [dts] [PATCH] TestSuite_eventdev_pipeline_perf.py: Adding Eventdev_pipeline feature performance Testscript X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" From: Thanseerulhaq Adding performance testscripts for 1/2/4 NIC ports for eventdev_pipeline features atomic, parallel, order stages. Adding eventdev_perf testcase supports for Marvell cards in test_case_supportlist.json file. Signed-off-by: Thanseerulhaq --- conf/test_case_supportlist.json | 135 +++++ tests/TestSuite_eventdev_pipeline_perf.py | 839 ++++++++++++++++++++++++++++++ 2 files changed, 974 insertions(+) create mode 100644 tests/TestSuite_eventdev_pipeline_perf.py diff --git a/conf/test_case_supportlist.json b/conf/test_case_supportlist.json index 1886cb0..7db258e 100644 --- a/conf/test_case_supportlist.json +++ b/conf/test_case_supportlist.json @@ -1223,5 +1223,140 @@ "Bug ID": "", "Comments": "This case currently support for cavium_a063 " } + ], + "perf_eventdev_pipeline_1ports_atomic_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_1ports_parallel_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_1ports_order_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_2ports_atomic_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_2ports_parallel_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_2ports_order_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_4ports_atomic_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_4ports_parallel_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } + ], + "perf_eventdev_pipeline_4ports_order_performance": [ + { + "OS": [ + "ALL" + ], + "NIC": [ + "cavium_a063" + ], + "Target": [ + "ALL" + ], + "Bug ID": "", + "Comments": "This case currently support for cavium_a063 " + } ] } diff --git a/tests/TestSuite_eventdev_pipeline_perf.py b/tests/TestSuite_eventdev_pipeline_perf.py new file mode 100644 index 0000000..ac99eb1 --- /dev/null +++ b/tests/TestSuite_eventdev_pipeline_perf.py @@ -0,0 +1,839 @@ +# BSD LICENSE +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (C) 2019 Marvell International Ltd. + +""" +DPDK Test suite. +Test userland 10Gb/25Gb/40Gb/100Gb +""" + +import utils +import re +import time +import os + +from test_case import TestCase +from time import sleep +from settings import HEADER_SIZE +from pmd_output import PmdOutput +from etgen import IxiaPacketGenerator + +from settings import FOLDERS +from system_info import SystemInfo +import perf_report +from datetime import datetime + +class TestEventdevPipelinePerf(TestCase,IxiaPacketGenerator): + + def set_up_all(self): + """ + Run at the start of each test suite. + + PMD prerequisites. + """ + self.tester.extend_external_packet_generator(TestEventdevPipelinePerf, self) + + self.frame_sizes = [64, 128, 256, 512, 1024, 1518] + + self.rxfreet_values = [0, 8, 16, 32, 64, 128] + + self.test_cycles = [ + {'cores': '1S/2C/1T', 'Mpps': {}, 'pct': {}}, + {'cores': '1S/3C/1T', 'Mpps': {}, 'pct': {}}, + {'cores': '1S/5C/1T', 'Mpps': {}, 'pct': {}}, + {'cores': '1S/9C/1T', 'Mpps': {}, 'pct': {}}, + {'cores': '1S/17C/1T', 'Mpps': {}, 'pct': {}}, + ] + self.get_cores_from_last = True + self.table_header = ['Frame Size'] + for test_cycle in self.test_cycles: + m = re.search(r"(\d+S/)(\d+)(C/\d+T)",test_cycle['cores']) + cores = m.group(1) + str(int(m.group(2))-1) + m.group(3) + self.table_header.append("%s Mpps" % cores) + self.table_header.append("% linerate") + + self.perf_results = {'header': [], 'data': []} + + self.blacklist = "" + + # Based on h/w type, choose how many ports to use + self.dut_ports = self.dut.get_ports() + if self.dut.get_os_type() == 'linux': + # Get dut system information + port_num = self.dut_ports[0] + pci_device_id = self.dut.ports_info[port_num]['pci'] + ori_driver = self.dut.ports_info[port_num]['port'].get_nic_driver() + self.dut.ports_info[port_num]['port'].bind_driver() + + self.dut.ports_info[port_num]['port'].bind_driver(ori_driver) + + if self.nic == "cavium_a063": + self.eventdev_device_bus_id = "0002:0e:00.0" + self.eventdev_device_id = "a0f9" + + #### Bind evendev device #### + self.dut.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) + + #### Configuring evendev SS0 & SSOw limits #### + self.dut.set_eventdev_port_limits(self.eventdev_device_id, self.eventdev_device_bus_id) + + self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE[ + 'ip'] + HEADER_SIZE['tcp'] + + self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + + self.pmdout = PmdOutput(self.dut) + + def set_up(self): + """ + Run before each test case. + """ + pass + + def eventdev_cmd(self, stlist, nports, wmask): + + self.Port_pci_ids = [] + command_line1 = "dpdk-eventdev_pipeline -c %s -w %s" + for i in range(0, nports): + self.Port_pci_ids.append(self.dut.ports_info[i]['pci']) + ## Adding core-list and pci-ids + command_line1 = command_line1 + " -w %s " + ## Adding test and stage types + command_line2 = "-- -w %s -n=0 --dump %s -m 16384" % (wmask , stlist ) + return command_line1 + command_line2 + + def test_perf_eventdev_pipeline_1ports_atomic_performance(self): + """ + Evendev_Pipeline Performance Benchmarking with 1 ports. + """ + self.verify(len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("", 1, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 1) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_1ports_parallel_performance(self): + """ + Evendev_Pipeline Performance Benchmarking with 1 ports. + """ + self.verify(len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-p", 1, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 1) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_1ports_order_performance(self): + """ + Evendev_Pipeline Performance Benchmarking with 1 ports. + """ + self.verify(len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-o", 1, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 1) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_2ports_atomic_performance(self): + """ + Evendev_Pipeline Performance Benchmarking with 2 ports. + """ + self.verify(len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("", 2, worker_core_mask ) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 2) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_2ports_parallel_performance(self): + """ + Evendev_Pipeline parallel schedule type Performance Benchmarking with 2 ports. + """ + self.verify(len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-p", 2, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 2) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_2ports_order_performance(self): + """ + Evendev_Pipeline Order schedule type Performance Benchmarking with 2 ports. + """ + self.verify(len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-o", 2, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 2) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_4ports_atomic_performance(self): + """ + Evendev_Pipeline Performance Benchmarking with 4 ports. + """ + self.verify(len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[2]), + self.tester.get_local_port(self.dut_ports[3]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[3]), + self.tester.get_local_port(self.dut_ports[2]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("", 4, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1], self.Port_pci_ids[2], self.Port_pci_ids[3]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 4) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_4ports_parallel_performance(self): + """ + Evendev_Pipeline parallel schedule type Performance Benchmarking with 4 ports. + """ + self.verify(len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[2]), + self.tester.get_local_port(self.dut_ports[3]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[3]), + self.tester.get_local_port(self.dut_ports[2]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-p", 4, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1], self.Port_pci_ids[2], self.Port_pci_ids[3]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 4) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def test_perf_eventdev_pipeline_4ports_order_performance(self): + """ + Evendev_Pipeline Order schedule type Performance Benchmarking with 4 ports. + """ + self.verify(len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test") + self.perf_results['header'] = [] + self.perf_results['data'] = [] + + all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + + # prepare traffic generator input + tgen_input = [] + tgen_input.append((self.tester.get_local_port(self.dut_ports[0]), + self.tester.get_local_port(self.dut_ports[1]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[2]), + self.tester.get_local_port(self.dut_ports[3]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[1]), + self.tester.get_local_port(self.dut_ports[0]), + "event_test.pcap")) + tgen_input.append((self.tester.get_local_port(self.dut_ports[3]), + self.tester.get_local_port(self.dut_ports[2]), + "event_test.pcap")) + + # run testpmd for each core config + for test_cycle in self.test_cycles: + core_config = test_cycle['cores'] + + core_list = self.dut.get_core_list(core_config, + socket=self.ports_socket, from_last = self.get_cores_from_last) + core_mask = utils.create_mask(core_list) + core_list.remove(core_list[0]) + worker_core_mask = utils.create_mask(core_list) + + command_line = self.eventdev_cmd("-o", 4, worker_core_mask) + command_line = command_line %(core_mask, self.eventdev_device_bus_id, self.Port_pci_ids[0], self.Port_pci_ids[1], self.Port_pci_ids[2], self.Port_pci_ids[3]) + self.dut.send_expect(command_line,"eventdev port 0", 100) + + info = "Executing Eventdev_pipeline using %s\n" % test_cycle['cores'] + self.logger.info(info) + self.rst_report(info, annex=True) + self.rst_report(command_line + "\n\n", frame=True, annex=True) + + for frame_size in self.frame_sizes: + wirespeed = self.wirespeed(self.nic, frame_size, 4) + + # create pcap file + self.logger.info("Running with frame size %d " % frame_size) + payload_size = frame_size - self.headers_size + self.tester.scapy_append( + 'wrpcap("event_test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (payload_size)) + self.tester.scapy_execute() + + # run traffic generator + _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=60) + pps /= 1000000.0 + pct = pps * 100 / wirespeed + test_cycle['Mpps'][frame_size] = float('%.3f' % pps) + test_cycle['pct'][frame_size] = float('%.3f' % pct) + + self.dut.send_expect("^C", "# ", 5) + sleep(5) + + for n in range(len(self.test_cycles)): + for frame_size in self.frame_sizes: + self.verify(self.test_cycles[n]['Mpps'][ + frame_size] > 0, "No traffic detected") + + # Print results + self.result_table_create(self.table_header) + self.perf_results['header'] = self.table_header + for frame_size in self.frame_sizes: + table_row = [frame_size] + for test_cycle in self.test_cycles: + table_row.append(test_cycle['Mpps'][frame_size]) + table_row.append(test_cycle['pct'][frame_size]) + + self.result_table_add(table_row) + self.perf_results['data'].append(table_row) + + self.result_table_print() + + def ip(self, port, frag, src, proto, tos, dst, chksum, len, options, version, flags, ihl, ttl, id): + self.add_tcl_cmd("protocol config -name ip") + self.add_tcl_cmd('ip config -sourceIpAddr "%s"' % src) + if self.nic != "cavium_a063": + self.add_tcl_cmd("ip config -sourceIpAddrMode ipRandom") + else: + self.add_tcl_cmd("ip config -sourceIpAddrMode ipIncrHost") + self.add_tcl_cmd("ip config -sourceIpAddrRepeatCount 100") + self.add_tcl_cmd('ip config -destIpAddr "%s"' % dst) + if self.nic == "cavium_a063": + self.add_tcl_cmd("ip config -destIpAddrMode ipIdle") + else: + self.add_tcl_cmd("ip config -destIpAddrMode ipIdle") + self.add_tcl_cmd("ip config -ttl %d" % ttl) + self.add_tcl_cmd("ip config -totalLength %d" % len) + self.add_tcl_cmd("ip config -fragment %d" % frag) + self.add_tcl_cmd("ip config -ipProtocol ipV4ProtocolReserved255") + self.add_tcl_cmd("ip config -identifier %d" % id) + self.add_tcl_cmd("stream config -framesize %d" % (len + 18)) + self.add_tcl_cmd("ip set %d %d %d" % (self.chasId, port['card'], port['port'])) + + + def tear_down(self): + """ + Run after each test case. + """ + self.dut.send_expect("^C", "# ", 5) + self.dut.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) + + def tear_down_all(self): + """ + Run after each test suite. + """ + self.dut.kill_all() -- 1.8.3.1