test suite reviews and discussions
 help / color / mirror / Atom feed
* Re: [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected.
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected Lijuan Tu
@ 2018-07-23  5:22   ` Liu, Yong
  0 siblings, 0 replies; 5+ messages in thread
From: Liu, Yong @ 2018-07-23  5:22 UTC (permalink / raw)
  To: Tu, Lijuan, dts; +Cc: Tu, Lijuan

Lijuan,
Some comments are inline.

Thanks,
Marvin

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of Lijuan Tu
> Sent: Monday, July 23, 2018 7:19 PM
> To: dts@dpdk.org
> Cc: Tu, Lijuan <lijuan.tu@intel.com>
> Subject: [dts] [next] [PATCH V2 1/3] framework: support argument: --
> update-expected.
> 
> A DPDK Performance Test Lab has been established, and Specification
> requires
> DTS to support an --update-expected argument which will cause the
> script to update values based on the results of the current test run.
> 
> When "--update-expected" added in bash command, and there is
> "update-expected = Ture" in suite configuration file. All objects in
> configuration file will be updated.
> 
> Take single core performance test for example:
> If "./dts --update-expected" and "update-expected = Ture" in
> conf/nic_single_core_perf.cfg, all objects will be updated in
> conf/nic_single_core_perf.cfg
> 
> Signed-off-by: Lijuan Tu <lijuan.tu@intel.com>
> ---
>  framework/config.py    | 11 +++++++++++
>  framework/dts.py       |  6 +++++-
>  framework/main.py      |  6 +++++-
>  framework/settings.py  |  1 +
>  framework/test_case.py | 24 ++++++++++++++++++++----
>  5 files changed, 42 insertions(+), 6 deletions(-)
> 
> diff --git a/framework/config.py b/framework/config.py
> index 628fc6d..0b112d8 100644
> --- a/framework/config.py
> +++ b/framework/config.py
> @@ -147,6 +147,17 @@ class SuiteConf(UserConf):
> 
>          return case_cfg
> 
> +    def update_case_config(self, case_name=""):
> +        """
> +        update section (case_name) of the configure file
> +        """
> +        update_suite_cfg_obj = UserConf(self.config_file)
> +        update_suite_cfg = update_suite_cfg_obj.load_section(case_name)
> +        for key in update_suite_cfg_obj.conf.options(case_name):
> +            update_suite_cfg_obj.conf.set(
> +                case_name, key, str(self.suite_cfg[key]))
> +        update_suite_cfg_obj.conf.write(open(self.config_file, 'w'))
> +
> 
>  class VirtConf(UserConf):
> 
> diff --git a/framework/dts.py b/framework/dts.py
> index 0b2240c..4435418 100644
> --- a/framework/dts.py
> +++ b/framework/dts.py
> @@ -439,7 +439,7 @@ def dts_run_suite(duts, tester, test_suites, target):
>  def run_all(config_file, pkgName, git, patch, skip_setup,
>              read_cache, project, suite_dir, test_cases,
>              base_dir, output_dir, verbose, virttype, debug,
> -            debugcase, re_run, commands):
> +            debugcase, re_run, commands, update_expected):
>      """
>      Main process of DTS, it will run all test suites in the config file.
>      """
> @@ -479,6 +479,10 @@ def run_all(config_file, pkgName, git, patch,
> skip_setup,
>      if debugcase is True:
>          settings.save_global_setting(settings.DEBUG_CASE_SETTING, 'yes')
> 
> +    # enable update-expected
> +    if update_expected is True:
> +        settings.save_global_setting(settings.UPDATE_EXPECTED, 'yes')
> +
>      # init log_handler handler
>      if verbose is True:
>          logger.set_verbose()
> diff --git a/framework/main.py b/framework/main.py
> index 0aa54fd..9d7ef31 100755
> --- a/framework/main.py
> +++ b/framework/main.py
> @@ -143,6 +143,10 @@ parser.add_argument('--commands',
>                      help='run command on tester or dut. The command
> format is ' +
>                      '[commands]:dut|tester:pre-init|post-
> init:check|ignore')
> 
> +parser.add_argument('--update-expected',
> +                    action='store_true',
> +                    help='update expected values based on test results')
> +
>  args = parser.parse_args()
> 
> 
> @@ -159,4 +163,4 @@ dts.run_all(args.config_file, args.snapshot, args.git,
>              args.patch, args.skip_setup, args.read_cache,
>              args.project, args.suite_dir, args.test_cases,
>              args.dir, args.output, args.verbose,args.virttype,
> -            args.debug, args.debugcase, args.re_run, args.commands)
> +            args.debug, args.debugcase, args.re_run, args.commands,
> args.update_expected)
> diff --git a/framework/settings.py b/framework/settings.py
> index 07c3ac6..2561ddb 100644
> --- a/framework/settings.py
> +++ b/framework/settings.py
> @@ -218,6 +218,7 @@ DPDK_RXMODE_SETTING = "DTS_DPDK_RXMODE"
>  DTS_ERROR_ENV = "DTS_RUNNING_ERROR"
>  DTS_CFG_FOLDER = "DTS_CFG_FOLDER"
>  DTS_PARALLEL_SETTING = "DTS_PARALLEL_ENABLE"
> +UPDATE_EXPECTED = "DTS_UPDATE_EXPECTED_ENABLE"
> 
> 
>  """
> diff --git a/framework/test_case.py b/framework/test_case.py
> index a84e2bb..228544c 100644
> --- a/framework/test_case.py
> +++ b/framework/test_case.py
> @@ -40,7 +40,9 @@ import time
> 
>  from exception import VerifyFailure, TimeoutException
>  from settings import DRIVERS, NICS, get_nic_name, load_global_setting
> -from settings import PERF_SETTING, FUNC_SETTING, DEBUG_SETTING,
> DEBUG_CASE_SETTING, HOST_DRIVER_SETTING
> +from settings import PERF_SETTING, FUNC_SETTING, DEBUG_SETTING
> +from settings import DEBUG_CASE_SETTING, HOST_DRIVER_SETTING
> +from settings import UPDATE_EXPECTED, SUITE_SECTION_NAME
>  from rst import RstReport
>  from test_result import ResultTable, Result
>  from logger import getLogger
> @@ -254,10 +256,7 @@ class TestCase(object):
>          self._rst_obj.write_title("Test Case: " + case_name)
> 
>          # load suite configuration file here for rerun command
> -        self._suite_conf = SuiteConf(self.suite_name)
> -        self._suite_cfg = self._suite_conf.suite_cfg
>          self._case_cfg = self._suite_conf.load_case_config(case_name)
> -        del(self._suite_conf)
> 

This is useful for reload case configuration. You can skip this if update expected is enabled.

>          case_result = True
>          if self._check_inst is not None:
> @@ -315,6 +314,11 @@ class TestCase(object):
>              self._suite_result.test_case_failed(trace)
>              self.logger.error('Test Case %s Result ERROR: ' % (case_name)
> + trace)
>          finally:
> +            # update expected
> +            if load_global_setting(UPDATE_EXPECTED) == "yes" and \
> +                self.get_suite_cfg().has_key('update_expected') and \
> +                self.get_suite_cfg()['update_expected'] == True:
> +                self._suite_conf.update_case_config(SUITE_SECTION_NAME)
>              self.tear_down()
>              return case_result
> 
> @@ -375,6 +379,18 @@ class TestCase(object):
>          """
>          return self._suite_cfg
> 
> +    def update_suite_cfg(self, suite_cfg):
> +

Lijuan,
I can't check where this function was called, is it useful?

        """
> +        Update suite based configuration
> +        """
> +        self._suite_cfg = suite_cfg
> +
> +    def update_suite_cfg_ele(self, key, value):
> +        """
> +        update one element of suite configuration
> +        """
> +        self._suite_cfg[key]=value
> +
Same comment as previous one, please add space between equal mark.

>      def execute_tear_downall(self):
>          """
>          execute suite tear_down_all function
> --
> 1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dts] [next] [PATCH V2 0/3] support update-expected to meet Open Lab requirement
@ 2018-07-23 11:18 Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected Lijuan Tu
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Lijuan Tu @ 2018-07-23 11:18 UTC (permalink / raw)
  To: dts; +Cc: Lijuan Tu

The Linux Foundation's DPDK Project has established a DPDK Performance
Test Lab. Its specification requires DTS to support an --update-expected
argument which will cause the script to update all expected values based
on the results of the current test run.

So far, single core performance test has been deployed, and more test cases
will be deployed in future.

Currently single core performance test script has many hardcode, remove
them and make it more appropriate to meet the DPDK Performance Test Lab
Specification.


Lijuan Tu (3):
  framework: support argument: --update-expected.
  tests/nic_single_core_perf: remove hardcode
  conf/nic_single_core_perf: redefine elements

 conf/nic_single_core_perf.cfg           |  37 +++-
 framework/config.py                     |  11 +
 framework/dts.py                        |   6 +-
 framework/main.py                       |   6 +-
 framework/settings.py                   |   1 +
 framework/test_case.py                  |  24 +-
 tests/TestSuite_nic_single_core_perf.py | 374 ++++++++++++++++----------------
 7 files changed, 262 insertions(+), 197 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected.
  2018-07-23 11:18 [dts] [next] [PATCH V2 0/3] support update-expected to meet Open Lab requirement Lijuan Tu
@ 2018-07-23 11:18 ` Lijuan Tu
  2018-07-23  5:22   ` Liu, Yong
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 2/3] tests/nic_single_core_perf: remove hardcode Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 3/3] conf/nic_single_core_perf: redefine elements Lijuan Tu
  2 siblings, 1 reply; 5+ messages in thread
From: Lijuan Tu @ 2018-07-23 11:18 UTC (permalink / raw)
  To: dts; +Cc: Lijuan Tu

A DPDK Performance Test Lab has been established, and Specification requires
DTS to support an --update-expected argument which will cause the
script to update values based on the results of the current test run.

When "--update-expected" added in bash command, and there is
"update-expected = Ture" in suite configuration file. All objects in
configuration file will be updated.

Take single core performance test for example:
If "./dts --update-expected" and "update-expected = Ture" in
conf/nic_single_core_perf.cfg, all objects will be updated in
conf/nic_single_core_perf.cfg

Signed-off-by: Lijuan Tu <lijuan.tu@intel.com>
---
 framework/config.py    | 11 +++++++++++
 framework/dts.py       |  6 +++++-
 framework/main.py      |  6 +++++-
 framework/settings.py  |  1 +
 framework/test_case.py | 24 ++++++++++++++++++++----
 5 files changed, 42 insertions(+), 6 deletions(-)

diff --git a/framework/config.py b/framework/config.py
index 628fc6d..0b112d8 100644
--- a/framework/config.py
+++ b/framework/config.py
@@ -147,6 +147,17 @@ class SuiteConf(UserConf):
 
         return case_cfg
 
+    def update_case_config(self, case_name=""):
+        """
+        update section (case_name) of the configure file
+        """
+        update_suite_cfg_obj = UserConf(self.config_file)
+        update_suite_cfg = update_suite_cfg_obj.load_section(case_name)
+        for key in update_suite_cfg_obj.conf.options(case_name):
+            update_suite_cfg_obj.conf.set(
+                case_name, key, str(self.suite_cfg[key]))
+        update_suite_cfg_obj.conf.write(open(self.config_file, 'w'))
+
 
 class VirtConf(UserConf):
 
diff --git a/framework/dts.py b/framework/dts.py
index 0b2240c..4435418 100644
--- a/framework/dts.py
+++ b/framework/dts.py
@@ -439,7 +439,7 @@ def dts_run_suite(duts, tester, test_suites, target):
 def run_all(config_file, pkgName, git, patch, skip_setup,
             read_cache, project, suite_dir, test_cases,
             base_dir, output_dir, verbose, virttype, debug,
-            debugcase, re_run, commands):
+            debugcase, re_run, commands, update_expected):
     """
     Main process of DTS, it will run all test suites in the config file.
     """
@@ -479,6 +479,10 @@ def run_all(config_file, pkgName, git, patch, skip_setup,
     if debugcase is True:
         settings.save_global_setting(settings.DEBUG_CASE_SETTING, 'yes')
 
+    # enable update-expected
+    if update_expected is True:
+        settings.save_global_setting(settings.UPDATE_EXPECTED, 'yes')
+
     # init log_handler handler
     if verbose is True:
         logger.set_verbose()
diff --git a/framework/main.py b/framework/main.py
index 0aa54fd..9d7ef31 100755
--- a/framework/main.py
+++ b/framework/main.py
@@ -143,6 +143,10 @@ parser.add_argument('--commands',
                     help='run command on tester or dut. The command format is ' +
                     '[commands]:dut|tester:pre-init|post-init:check|ignore')
 
+parser.add_argument('--update-expected',
+                    action='store_true',
+                    help='update expected values based on test results')
+
 args = parser.parse_args()
 
 
@@ -159,4 +163,4 @@ dts.run_all(args.config_file, args.snapshot, args.git,
             args.patch, args.skip_setup, args.read_cache,
             args.project, args.suite_dir, args.test_cases,
             args.dir, args.output, args.verbose,args.virttype,
-            args.debug, args.debugcase, args.re_run, args.commands)
+            args.debug, args.debugcase, args.re_run, args.commands, args.update_expected)
diff --git a/framework/settings.py b/framework/settings.py
index 07c3ac6..2561ddb 100644
--- a/framework/settings.py
+++ b/framework/settings.py
@@ -218,6 +218,7 @@ DPDK_RXMODE_SETTING = "DTS_DPDK_RXMODE"
 DTS_ERROR_ENV = "DTS_RUNNING_ERROR"
 DTS_CFG_FOLDER = "DTS_CFG_FOLDER"
 DTS_PARALLEL_SETTING = "DTS_PARALLEL_ENABLE"
+UPDATE_EXPECTED = "DTS_UPDATE_EXPECTED_ENABLE"
 
 
 """
diff --git a/framework/test_case.py b/framework/test_case.py
index a84e2bb..228544c 100644
--- a/framework/test_case.py
+++ b/framework/test_case.py
@@ -40,7 +40,9 @@ import time
 
 from exception import VerifyFailure, TimeoutException
 from settings import DRIVERS, NICS, get_nic_name, load_global_setting
-from settings import PERF_SETTING, FUNC_SETTING, DEBUG_SETTING, DEBUG_CASE_SETTING, HOST_DRIVER_SETTING
+from settings import PERF_SETTING, FUNC_SETTING, DEBUG_SETTING
+from settings import DEBUG_CASE_SETTING, HOST_DRIVER_SETTING
+from settings import UPDATE_EXPECTED, SUITE_SECTION_NAME
 from rst import RstReport
 from test_result import ResultTable, Result
 from logger import getLogger
@@ -254,10 +256,7 @@ class TestCase(object):
         self._rst_obj.write_title("Test Case: " + case_name)
 
         # load suite configuration file here for rerun command
-        self._suite_conf = SuiteConf(self.suite_name)
-        self._suite_cfg = self._suite_conf.suite_cfg
         self._case_cfg = self._suite_conf.load_case_config(case_name)
-        del(self._suite_conf)
 
         case_result = True
         if self._check_inst is not None:
@@ -315,6 +314,11 @@ class TestCase(object):
             self._suite_result.test_case_failed(trace)
             self.logger.error('Test Case %s Result ERROR: ' % (case_name) + trace)
         finally:
+            # update expected
+            if load_global_setting(UPDATE_EXPECTED) == "yes" and \
+                self.get_suite_cfg().has_key('update_expected') and \
+                self.get_suite_cfg()['update_expected'] == True:
+                self._suite_conf.update_case_config(SUITE_SECTION_NAME)
             self.tear_down()
             return case_result
 
@@ -375,6 +379,18 @@ class TestCase(object):
         """
         return self._suite_cfg
 
+    def update_suite_cfg(self, suite_cfg):
+        """
+        Update suite based configuration
+        """
+        self._suite_cfg = suite_cfg
+
+    def update_suite_cfg_ele(self, key, value):
+        """
+        update one element of suite configuration
+        """
+        self._suite_cfg[key]=value
+
     def execute_tear_downall(self):
         """
         execute suite tear_down_all function
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dts] [next] [PATCH V2 2/3] tests/nic_single_core_perf: remove hardcode
  2018-07-23 11:18 [dts] [next] [PATCH V2 0/3] support update-expected to meet Open Lab requirement Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected Lijuan Tu
@ 2018-07-23 11:18 ` Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 3/3] conf/nic_single_core_perf: redefine elements Lijuan Tu
  2 siblings, 0 replies; 5+ messages in thread
From: Lijuan Tu @ 2018-07-23 11:18 UTC (permalink / raw)
  To: dts; +Cc: Lijuan Tu

This current script have lots of hardcode, so remove them, and re-define
configuration file to keep up with the script changes.

Also update values to global variable, then framework can update them if it is
meet the requirement.


Signed-off-by: Lijuan Tu <lijuan.tu@intel.com>
---
 tests/TestSuite_nic_single_core_perf.py | 374 ++++++++++++++++----------------
 1 file changed, 189 insertions(+), 185 deletions(-)

diff --git a/tests/TestSuite_nic_single_core_perf.py b/tests/TestSuite_nic_single_core_perf.py
index 0cb1394..e361ffc 100644
--- a/tests/TestSuite_nic_single_core_perf.py
+++ b/tests/TestSuite_nic_single_core_perf.py
@@ -39,12 +39,14 @@ import re
 import time
 from test_case import TestCase
 from time import sleep
-from settings import HEADER_SIZE
+from exception import VerifyFailure
+from settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting
 from pmd_output import PmdOutput
 from copy import deepcopy
 from prettytable import PrettyTable
 import rst
 
+
 class TestNicSingleCorePerf(TestCase):
 
     def set_up_all(self):
@@ -52,260 +54,257 @@ class TestNicSingleCorePerf(TestCase):
         Run at the start of each test suite.
         PMD prerequisites.
         """
+        self.verify(self.nic in ['niantic', 'fortville_25g', 'fortville_spirit',
+                                 'ConnectX5_MT4121', 'ConnectX4_LX_MT4117'],
+                                 "Not required NIC ")
 
-        self.frame_sizes = [64]
         self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip']
-        self.ixgbe_descriptors = [128, 512, 2048]
-        self.i40e_descriptors = [512, 2048]
-        self.cx5_descriptors = [128, 256, 512, 2048]
-        self.cx4lx25g_descriptors = [128, 256, 512, 2048]
-        self.cx4lx40g_descriptors = [128, 256, 512, 2048]
+
+        # test parameters include: frames size, descriptor numbers
+        self.test_parameters = self.get_suite_cfg()['test_parameters']
 
         # traffic duraion in second
-        self.trafficDuration = 60
+        self.test_duration = self.get_suite_cfg()['test_duration']
 
-        #load the expected throughput for required nic
-        self.expected_throughput_nnt = self.get_suite_cfg()["throughput_nnt"]
-        self.expected_throughput_fvl25g = self.get_suite_cfg()["throughput_fvl25g"]
-        self.expected_throughput_fvl40g = self.get_suite_cfg()["throughput_fvl40g"]
-        self.expected_throughput_cx5 = self.get_suite_cfg()["throughput_cx5"]
-        self.expected_throughput_cx4lx25g = self.get_suite_cfg()["throughput_cx4lx25g"]
-        self.expected_throughput_cx4lx40g = self.get_suite_cfg()["throughput_cx4lx40g"]
+        # load the expected throughput for required nic
+        if self.nic in ["ConnectX4_LX_MT4117"]:
+            nic_speed = self.dut.ports_info[0]['port'].get_nic_speed()
+            if nic_speed == "25000":
+                self.expected_throughput = self.get_suite_cfg(
+                )['expected_throughput'][self.nic]['25G']
+            else:
+                self.expected_throughput = self.get_suite_cfg(
+                )['expected_throughput'][self.nic]['40G']
+        else:
+            self.expected_throughput = self.get_suite_cfg()[
+                'expected_throughput'][self.nic]
+
+        # initilize throughput attribution
+        # {'$framesize':{"$nb_desc": 'throughput'}
+        self.throughput = {}
 
-        # The acdepted gap between expected throughput and actual throughput, 1 Mpps
-        self.gap = 1
+        # Accepted tolerance in Mpps
+        self.gap = self.get_suite_cfg()['accepted_tolerance']
 
         # header to print test result table
-        self.table_header = ['Frame Size', 'TXD/RXD', 'Throughput', 'Rate', 'Expected Throughput']
+        self.table_header = ['Frame Size', 'TXD/RXD', 'Throughput', 'Rate',
+                             'Expected Throughput', 'Throughput Difference']
+        self.test_result = {}
 
         # Update config file and rebuild to get best perf on FVL
         if self.nic in ["fortville_25g", "fortville_spirit"]:
-            self.dut.send_expect("sed -i -e 's/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/' ./config/common_base", "#", 20)
-            self.dut.build_install_dpdk(self.target)
+            self.dut.send_expect(
+                "sed -i -e 's/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/' ./config/common_base", "#", 20)
+            # self.dut.build_install_dpdk(self.target)
 
         # Based on h/w type, choose how many ports to use
         self.dut_ports = self.dut.get_ports()
-
         self.socket = self.dut.get_numa_id(self.dut_ports[0])
-
         self.pmdout = PmdOutput(self.dut)
 
-        self.test_result = {}
-
         # determine if to save test result as a separated file
-        self.save_result_flag =  True
+        self.save_result_flag = True
 
     def set_up(self):
         """
         Run before each test case.
         """
-        if self.nic == "niantic":
-            self.descriptors = self.ixgbe_descriptors
-        elif self.nic in ["fortville_25g", "fortville_spirit"]:
-            self.descriptors = self.i40e_descriptors
-        elif self.nic in ["ConnectX5_MT4121"]:
-            self.descriptors = self.cx5_descriptors
-        elif self.nic in ["ConnectX4_LX_MT4117"]:
-            nic_speed = self.dut.ports_info[0]['port'].get_nic_speed()
-            if nic_speed == "25000":
-                self.descriptors = self.cx4lx25g_descriptors
-            else:
-                self.descriptors = self.cx4lx40g_descriptors
-        else:
-            raise Exception("Not required NIC")
+        pass
 
     def test_nic_single_core_perf(self):
         """
         Run nic single core performance 
         """
-        self.verify(len(self.dut_ports) == 2 or len(self.dut_ports) == 4, "Require 2 or 4 ports to test")
-        self.verify(self.nic in ['niantic', 'fortville_25g', 'fortville_spirit',
-                'ConnectX5_MT4121', 'ConnectX4_LX_MT4117'], "Not required NIC ")
-        if len(self.dut_ports) == 2:
-            self.perf_test(2)   
-        elif len(self.dut_ports) == 4:
-            self.perf_test(4)
+        self.nb_ports = len(self.dut_ports)
+        self.verify(self.nb_ports == 2 or self.nb_ports == 4,
+                    "Require 2 or 4 ports to test")
+        self.perf_test(self.nb_ports)
+        self.handle_results()
+
+        # check the gap between expected throughput and actual throughput
+        try:
+            for frame_size in self.test_parameters.keys():
+                for nb_desc in self.test_parameters[frame_size]:
+                    cur_gap = (self.expected_throughput[frame_size][nb_desc] -
+                                self.throughput[frame_size][nb_desc])
+                    self.verify(cur_gap < self.gap,
+                                 "Beyond Gap, Possible regression")
+        except Exception as e:
+            self.logger.error(e)
+            self.handle_expected()
+            raise VerifyFailure(
+                "Possible regression, Check your configuration please")
+        else:
+            self.handle_expected()
+
+    def handle_expected(self):
+        """
+        Update expected numbers to configurate file: conf/$suite_name.cfg
+        """
+        if load_global_setting(UPDATE_EXPECTED) == "yes":
+            for frame_size in self.test_parameters.keys():
+                for nb_desc in self.test_parameters[frame_size]:
+                    self.expected_throughput[frame_size][nb_desc] = \
+                        self.throughput[frame_size][nb_desc]
 
     def perf_test(self, port_num):
         """
         Single core Performance Benchmarking test
         """
-        # traffic option
-        options = {
-             'rate' : '100%',
-             #'ip': {'action': 'inc', 'mask' : '255.255.255.0', 'step': '0.0.0.1'}
-            }
+        # ports whitelist
+        eal_para = ""
+        for i in range(self.nb_ports):
+            eal_para += " -w " + self.dut.ports_info[i]['pci']
 
-        header = self.table_header
-        if port_num == 2:
-            pci0 = self.dut.ports_info[0]['pci']
-            pci1 = self.dut.ports_info[1]['pci']
-            eal = "-w %s -w %s" % (pci0, pci1)
-        elif port_num == 4:
-            pci0 = self.dut.ports_info[0]['pci']
-            pci1 = self.dut.ports_info[1]['pci']
-            pci2 = self.dut.ports_info[2]['pci']
-            pci3 = self.dut.ports_info[3]['pci']
-            eal = "-w %s -w %s -w %s -w %s" % (pci0, pci1, pci2, pci3)
-
-        # run testpmd with 2 cores
+        # run testpmd with 2 cores, one for interaction ,and one for forwarding
         core_config = "1S/2C/1T"
-        core_list = self.dut.get_core_list(core_config, socket=self.socket)
+        core_list = self.dut.get_core_list(core_config, socket = self.socket)
+        self.logger.info("Executing Test Using cores: %s" % core_list)
         port_mask = utils.create_mask(self.dut_ports)
 
-        for frame_size in self.frame_sizes:
-            ret_datas = {}
-            for descriptor in self.descriptors:
-                self.logger.info("Executing Test Using cores: %s" % core_list)
-                if self.nic in ["fortville_25g", "fortville_spirit"]:
-                    self.pmdout.start_testpmd(core_config, "--portmask=%s --txd=%d --rxd=%d --rxq=2 --txq=2" % (port_mask, descriptor, descriptor),eal, socket=self.socket)
-                else:
-                    self.pmdout.start_testpmd(core_config, "--portmask=%s --txd=%d --rxd=%d" % (port_mask, descriptor, descriptor),eal, socket=self.socket)
+        # parameters for application/testpmd
+        param = " --portmask=%s" % (port_mask)
+        # fortville has to use 2 queues at least to get the best performance
+        if self.nic in ["fortville_25g", "fortville_spirit"]:
+            param += " --rxq=2 --txq=2"
+
+        for frame_size in self.test_parameters.keys():
+            self.throughput[frame_size] = dict()
+            for nb_desc in self.test_parameters[frame_size]:
+                self.logger.info("Test running at parameters: " +
+                    "framesize: {}, rxd/txd: {}".format(frame_size, nb_desc))
+                parameter = param + " --txd=%d --rxd=%d" % (nb_desc, nb_desc)
+                self.pmdout.start_testpmd(
+                    core_config, parameter, eal_para, socket = self.socket)
                 self.dut.send_expect("start", "testpmd> ", 15)
 
-                self.logger.info("Running with frame size %d " % frame_size)
-
-                # create pcap file
-                payload_size = frame_size - self.headers_size
-                self.tester.scapy_append(
-                        'wrpcap("test.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/("X"*%d)])' % payload_size)
-                self.tester.scapy_execute()
-                self.tester.scapy_append(
-                        'wrpcap("test1.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="2.2.3.4",dst="1.1.1.1")/("X"*%d)])' % payload_size)
-                self.tester.scapy_execute()
-
-                # send the traffic
-                streams = self.prepare_stream(port_num, options)      
-                _, packets_received = self.tester.pktgen.measure_throughput(stream_ids=streams, delay=self.trafficDuration)
-
+                # measure throughput
+                stream_ids = self.prepare_stream(frame_size)
+                _, packets_received = self.tester.pktgen.measure_throughput(
+                    stream_ids = stream_ids, delay = self.test_duration)
                 throughput = packets_received / 1000000.0
+                self.throughput[frame_size][nb_desc] = throughput
 
                 self.dut.send_expect("stop", "testpmd> ")
                 self.dut.send_expect("quit", "# ", 30)
 
-                self.logger.info("Throughput result for Descriptor :%s is :%s Mpps" % (descriptor, throughput))
+                self.verify(throughput,
+                    "No traffic detected, please check your configuration")
+                self.logger.info("Trouthput of " +
+                    "framesize: {}, rxd/txd: {} is :{} Mpps".format(
+                        frame_size, nb_desc, throughput))
+
+        return self.throughput
 
-                wirespeed = self.wirespeed(self.nic, frame_size, port_num)
+    def handle_results(self):
+        """
+        results handled process:
+        1, save to self.test_results
+        2, create test results table
+        3, save to json file for Open Lab
+        """
 
-                # one entry for test result record
+        # save test results to self.test_result
+        header = self.table_header
+        for frame_size in self.test_parameters.keys():
+            wirespeed = self.wirespeed(self.nic, frame_size, self.nb_ports)
+            ret_datas = {}
+            for nb_desc in self.test_parameters[frame_size]:
                 ret_data = {}
                 ret_data[header[0]] = frame_size
-                ret_data[header[1]] = descriptor
-                ret_data[header[2]] = str(float("%.3f" % throughput)) + " Mpps"
-                ret_data[header[3]] = str(float("%.3f" % (throughput * 100 / wirespeed))) + "%"
-                if self.nic == "niantic":
-                    ret_data[header[4]] = str(self.expected_throughput_nnt[frame_size][descriptor]) + " Mpps"
-                elif self.nic == "fortville_25g":
-                    ret_data[header[4]] = str(self.expected_throughput_fvl25g[frame_size][descriptor]) + " Mpps"
-                elif self.nic == "fortville_spirit":
-                    ret_data[header[4]] = str(self.expected_throughput_fvl40g[frame_size][descriptor]) + " Mpps"
-                elif self.nic == "ConnectX5_MT4121":
-                    ret_data[header[4]] = str(self.expected_throughput_cx5[frame_size][descriptor]) + " Mpps"
-                elif self.nic == "ConnectX4_LX_MT4117":
-                    nic_speed = self.dut.ports_info[0]['port'].get_nic_speed()
-                    if nic_speed == "25000":
-                        ret_data[header[4]] = str(self.expected_throughput_cx4lx25g[frame_size][descriptor]) + " Mpps"
-                    else:
-                        ret_data[header[4]] = str(self.expected_throughput_cx4lx40g[frame_size][descriptor]) + " Mpps"
-                ret_datas[descriptor] = deepcopy(ret_data)
-                self.test_result[frame_size] = deepcopy(ret_datas)
-        
-        for frame_size in self.frame_sizes:
-            for descriptor in self.descriptors:
-                self.verify(self.test_result[frame_size][descriptor][header[2]] > 0, "No traffic detected")
-
-        # Print results
+                ret_data[header[1]] = nb_desc
+                ret_data[header[2]] = "{:.3f} Mpps".format(
+                    self.throughput[frame_size][nb_desc])
+                ret_data[header[3]] = "{:.3f}%".format(
+                    self.throughput[frame_size][nb_desc] * 100 / wirespeed)
+                ret_data[header[4]] = "{:.3f} Mpps".format(
+                    self.expected_throughput[frame_size][nb_desc])
+                ret_data[header[5]] = "{:.3f} Mpps".format(
+                    self.throughput[frame_size][nb_desc] -
+                        self.expected_throughput[frame_size][nb_desc])
+
+                ret_datas[nb_desc] = deepcopy(ret_data)
+            self.test_result[frame_size] = deepcopy(ret_datas)
+
+        # Create test results table
         self.result_table_create(header)
-        for frame_size in self.frame_sizes:
-            for descriptor in self.descriptors:
-                table_row = [self.test_result[frame_size][descriptor][header[0]]]
-                table_row.append(self.test_result[frame_size][descriptor][header[1]])
-                table_row.append(self.test_result[frame_size][descriptor][header[2]])
-                table_row.append(self.test_result[frame_size][descriptor][header[3]])
-                table_row.append(self.test_result[frame_size][descriptor][header[4]])
+        for frame_size in self.test_parameters.keys():
+            for nb_desc in self.test_parameters[frame_size]:
+                table_row = list()
+                for i in range(len(header)):
+                    table_row.append(
+                        self.test_result[frame_size][nb_desc][header[i]])
                 self.result_table_add(table_row)
-
+        # present test results to screen
         self.result_table_print()
 
         # save test results as a file
         if self.save_result_flag:
             self.save_result(self.test_result)
 
-        # check if the gap between expected throughput and actual throughput exceed accepted gap 
-        for frame_size in self.frame_sizes:
-            for descriptor in self.descriptors:
-                self.verify(float(self.test_result[frame_size][descriptor][header[4]].split()[0]) -
-                    float(self.test_result[frame_size][descriptor][header[2]].split()[0]) < self.gap, "Exceeded Gap")
-
-    def prepare_stream(self, port_num, options):
+    def prepare_stream(self, frame_size):
         '''
-        create streams for ports, one port one stream
+        create streams for ports, one port two streams, and configure them.
         '''
-        # configure 2 streams for each tx port
-        if port_num == 2:
-            txport0 = self.tester.get_local_port(self.dut.get_ports()[0])
-            txport1 = self.tester.get_local_port(self.dut.get_ports()[1])
-            stream_id0 = self.tester.pktgen.add_stream(txport0, txport1, r'/root/test.pcap')
-            stream_id1 = self.tester.pktgen.add_stream(txport0, txport1, r'/root/test1.pcap')
-            stream_id2 = self.tester.pktgen.add_stream(txport1, txport0, r'/root/test.pcap')
-            stream_id3 = self.tester.pktgen.add_stream(txport1, txport0, r'/root/test1.pcap')
-            self.tester.pktgen.config_stream(stream_id0, options)
-            self.tester.pktgen.config_stream(stream_id1, options)
-            self.tester.pktgen.config_stream(stream_id2, options)
-            self.tester.pktgen.config_stream(stream_id3, options)
-            return [stream_id0, stream_id1, stream_id2, stream_id3]
-        # configure 1 stream for each tx port
-        elif port_num == 4:
-            txport0 = self.tester.get_local_port(self.dut.get_ports()[0])
-            txport1 = self.tester.get_local_port(self.dut.get_ports()[1])
-            txport2 = self.tester.get_local_port(self.dut.get_ports()[2])
-            txport3 = self.tester.get_local_port(self.dut.get_ports()[3])
-            stream_id0 = self.tester.pktgen.add_stream(txport0, txport1, r'/root/test.pcap')
-            stream_id1 = self.tester.pktgen.add_stream(txport1, txport0, r'/root/test.pcap')
-            stream_id2 = self.tester.pktgen.add_stream(txport2, txport3, r'/root/test.pcap')
-            stream_id3 = self.tester.pktgen.add_stream(txport3, txport2, r'/root/test.pcap')
-            self.tester.pktgen.config_stream(stream_id0, options)
-            self.tester.pktgen.config_stream(stream_id1, options)
-            self.tester.pktgen.config_stream(stream_id2, options)
-            self.tester.pktgen.config_stream(stream_id3, options)
-            return [stream_id0, stream_id1, stream_id2, stream_id3]
+        # traffic option
+        options = {
+            'rate': '100%',
+            # 'ip': {'action': 'inc', 'mask' : '255.255.255.0', 'step': '0.0.0.1'}
+        }
+
+        # create pcap file
+        payload_size = frame_size - self.headers_size
+        self.tester.scapy_append(
+            'wrpcap("/tmp/test0.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/("X"*%d)])' % payload_size)
+        self.tester.scapy_append(
+            'wrpcap("/tmp/test1.pcap", [Ether(src="52:00:00:00:00:00")/IP(src="2.2.3.4",dst="1.1.1.1")/("X"*%d)])' % payload_size)
+        self.tester.scapy_execute()
+
+        stream_ids = []
+        for i in range(self.nb_ports):
+            if i % 2 == 0:
+                txport = self.tester.get_local_port(self.dut.get_ports()[i])
+                rxport = self.tester.get_local_port(
+                    self.dut.get_ports()[i + 1])
+
+                # fortville requires 2 streams for 2 queues at least, and
+                # this's fine for other NIC too.
+                for k in range(2):
+                    # txport -> rxport
+                    stream_id = self.tester.pktgen.add_stream(
+                        txport, rxport, '/tmp/test{}.pcap'.format(k))
+                    self.tester.pktgen.config_stream(stream_id, options)
+                    stream_ids.append(stream_id)
+                    # rxport -> txport
+                    stream_id = self.tester.pktgen.add_stream(
+                        rxport, txport, '/tmp/test{}.pcap'.format(k))
+                    self.tester.pktgen.config_stream(stream_id, options)
+                    stream_ids.append(stream_id)
+
+        return stream_ids
 
     def save_result(self, data):
         '''
-        Saves the test results as a separated file named with self.nic+_single_core_perf.txt
-        in output folder if self.save_result_flag is True
+        Saves the test results as a separated file named with
+        self.nic+_single_core_perf.json in output folder
+        if self.save_result_flag is True
         '''
-        header = self.table_header
-        table = PrettyTable(header)
-        for frame_size in self.frame_sizes:
-            for descriptor in self.descriptors:
-                table_row = [self.test_result[frame_size][descriptor][header[0]]]
-                table_row.append(self.test_result[frame_size][descriptor][header[1]])
-                table_row.append(self.test_result[frame_size][descriptor][header[2]])
-                table_row.append(self.test_result[frame_size][descriptor][header[3]])
-                table_row.append(self.test_result[frame_size][descriptor][header[4]])
-                table.add_row(table_row)
-        file_to_save = open(os.path.join(
-            rst.path2Result, "%s_single_core_perf.txt" % self.nic), 'w')
-        file_to_save.write(str(table))
-        file_to_save.close()
-
         json_obj = dict()
         json_obj['nic_type'] = self.nic
         json_obj['results'] = list()
-        for frame_size in self.frame_sizes:
-            for descriptor in self.descriptors:
-                row_in = self.test_result[frame_size][descriptor]
+        for frame_size in self.test_parameters.keys():
+            for nb_desc in self.test_parameters[frame_size]:
+                row_in = self.test_result[frame_size][nb_desc]
                 row_dict = dict()
                 row_dict['parameters'] = dict()
                 row_dict['parameters']['frame_size'] = dict(
-                    value=row_in['Frame Size'], unit='bytes')
+                    value = row_in['Frame Size'], unit = 'bytes')
                 row_dict['parameters']['txd/rxd'] = dict(
-                    value=row_in['TXD/RXD'], unit='descriptors')
+                    value = row_in['TXD/RXD'], unit = 'descriptors')
                 delta = (float(row_in['Throughput'].split()[0]) -
                          float(row_in['Expected Throughput'].split()[0]))
                 row_dict['throughput'] = dict(
-                    delta=delta, unit=row_in['Throughput'].split()[1])
+                    delta = delta, unit = row_in['Throughput'].split()[1])
                 json_obj['results'].append(row_dict)
         with open(os.path.join(rst.path2Result,
                                '{0:s}_single_core_perf.json'.format(
@@ -316,11 +315,16 @@ class TestNicSingleCorePerf(TestCase):
         """
         Run after each test case.
         """
-        if self.nic in ["fortville_25g"]:
-            self.dut.send_expect("sed -i -e 's/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/' ./config/common_base", "#", 20)    
+        pass
 
     def tear_down_all(self):
         """
         Run after each test suite.
         """
+        # resume setting
+        if self.nic in ["fortville_25g", "fortville_spirit"]:
+            self.dut.send_expect(
+                "sed -i -e 's/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/' ./config/common_base", "#", 20)
+            # self.dut.build_install_dpdk(self.target)
+
         self.dut.kill_all()
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dts] [next] [PATCH V2 3/3] conf/nic_single_core_perf: redefine elements
  2018-07-23 11:18 [dts] [next] [PATCH V2 0/3] support update-expected to meet Open Lab requirement Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected Lijuan Tu
  2018-07-23 11:18 ` [dts] [next] [PATCH V2 2/3] tests/nic_single_core_perf: remove hardcode Lijuan Tu
@ 2018-07-23 11:18 ` Lijuan Tu
  2 siblings, 0 replies; 5+ messages in thread
From: Lijuan Tu @ 2018-07-23 11:18 UTC (permalink / raw)
  To: dts; +Cc: Lijuan Tu

Test case script changed, and align configuration file with these changes

Define several attribution:
 * update_expected
 * test_duration
 * accepted_tolerance
 * test_duration
 * expected_throughput

Signed-off-by: Lijuan Tu <lijuan.tu@intel.com>
---
 conf/nic_single_core_perf.cfg | 37 +++++++++++++++++++++++++++++++------
 1 file changed, 31 insertions(+), 6 deletions(-)

diff --git a/conf/nic_single_core_perf.cfg b/conf/nic_single_core_perf.cfg
index 444307e..da1a050 100644
--- a/conf/nic_single_core_perf.cfg
+++ b/conf/nic_single_core_perf.cfg
@@ -1,11 +1,36 @@
+# Throughput numbers vary in different environment.
+# Change these configuration on demand.
+#  - test_parameters defines the combination of frame size and descriptor
+# numbers, and the pattern is
+#    {'frame size': ['descriptor number #1', 'descriptor number #2']}
+#  - test_duration is how many seconds each combination performance will
+#  be recorded.
+#  - accepted_tolerance defines the accepted tolerance between test
+# results and expected numbers.
+#  - expected_throughput is a dictionary defining expected throughput
+# numbers based on NIC, and the pattern is
+# {'NIC': {'frame size': {'descriptor number': 'excepted throughput'}}}
+# Default numbers of fortville_spirit and niantic is generated on this
+# environment:
+#     * Intel(R) Xeon(R) Platinum 8180 CPU @ 2.50GHz
+#     * Trex v3.23
+#  - if update_expected == Ture, and add argument "--update-expected" in
+# bash command, all objects in this file will changed after the run
+# for example: ./dts --update-expected
+
 [suite]
-# list expected throughput values for 64byte packet size and different
-# txd/rxd, these values may vary dut to different test enviroment,
-# please adjust accordingly, the below nnt and fvl25g expected
-# throughputs were tested under Purly,Ubuntu 16.04, IXIA
-# the formate for the expected throughput is a dictinary which means 
-#    {"packet size": {"descriptor ring size": "throughput"}}
+update_expected = True
+test_parameters = {64: [512, 2048]}
+test_duration = 60
+accepted_tolerance = 1
+expected_throughput = {'fortville_spirit': {64: {512: 62.35, 2048: 47.89}},
+    'niantic': {64: {128: 53.435, 512: 53.699, 2048: 42.798}},
+    'fortville_25g': {64: {512: 62.35, 2048: 47.651}},
+    'ConnectX4_LX_MT4117': {'40G': {64: {128: 31.635, 256: 32.473, 512: 30.72, 2048: 26.94}},
+                            '25G': {64: {128: 28.178, 256: 34.581, 512: 30.528, 2048: 26.004}}},
+    'ConnectX5_MT4121': {64: {128: 42.161, 256: 56.651, 512: 47.091, 2048: 40.104}}}
 
+# legacy formate for reference
 throughput_nnt = {64: {128: 53.435, 512: 53.699, 2048: 42.798}}
 throughput_fvl25g = {64: {512: 43.777, 2048: 43.651}}
 throughput_fvl40g = {64: {512: 62.35, 2048: 47.89}}
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-07-23  5:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-23 11:18 [dts] [next] [PATCH V2 0/3] support update-expected to meet Open Lab requirement Lijuan Tu
2018-07-23 11:18 ` [dts] [next] [PATCH V2 1/3] framework: support argument: --update-expected Lijuan Tu
2018-07-23  5:22   ` Liu, Yong
2018-07-23 11:18 ` [dts] [next] [PATCH V2 2/3] tests/nic_single_core_perf: remove hardcode Lijuan Tu
2018-07-23 11:18 ` [dts] [next] [PATCH V2 3/3] conf/nic_single_core_perf: redefine elements Lijuan Tu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).