test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1]tests/nic_single_core:Save standardized test results
@ 2020-02-21  7:40 hanyingya
  2020-02-21  7:57 ` Tu, Lijuan
  0 siblings, 1 reply; 2+ messages in thread
From: hanyingya @ 2020-02-21  7:40 UTC (permalink / raw)
  To: dts; +Cc: hanyingya

Rich results in json format and remove redundant verification code.

Signed-off-by: hanyingya <yingyax.han@intel.com>
---
 tests/TestSuite_nic_single_core_perf.py | 54 +++++++++++--------------
 1 file changed, 23 insertions(+), 31 deletions(-)

diff --git a/tests/TestSuite_nic_single_core_perf.py b/tests/TestSuite_nic_single_core_perf.py
index 5781bc4..5fafe5f 100644
--- a/tests/TestSuite_nic_single_core_perf.py
+++ b/tests/TestSuite_nic_single_core_perf.py
@@ -197,20 +197,7 @@ class TestNicSingleCorePerf(TestCase):
         self.verify(self.nb_ports >= 1, "At least 1 port is required to test")
         self.perf_test(self.nb_ports)
         self.handle_results()
-
-        # check the gap between expected throughput and actual throughput
-        try:
-            for frame_size in list(self.test_parameters.keys()):
-                for nb_desc in self.test_parameters[frame_size]:
-                    cur_gap = (self.expected_throughput[frame_size][nb_desc] - self.throughput[frame_size][nb_desc])
-                    self.verify(cur_gap < self.gap, "Beyond Gap, Possible regression")
-        except Exception as e:
-            self.logger.error(e)
-            self.handle_expected()
-            raise VerifyFailure(
-                "Possible regression, Check your configuration please")
-        else:
-            self.handle_expected()
+        self.handle_expected()
 
     def handle_expected(self):
         """
@@ -336,27 +323,32 @@ class TestNicSingleCorePerf(TestCase):
         if self.save_result_flag is True
         '''
         json_obj = dict()
-        json_obj['nic_type'] = self.nic
-        json_obj['results'] = list()
+        case_name = self.running_case
+        json_obj[case_name] = list()
+        status_result = []
         for frame_size in list(self.test_parameters.keys()):
             for nb_desc in self.test_parameters[frame_size]:
                 row_in = self.test_result[frame_size][nb_desc]
-                row_dict = dict()
-                row_dict['parameters'] = dict()
-                row_dict['parameters']['frame_size'] = dict(
-                    value=row_in['Frame Size'], unit='bytes')
-                row_dict['parameters']['txd/rxd'] = dict(
-                    value=row_in['TXD/RXD'], unit='descriptors')
-                delta = (float(row_in['Throughput'].split()[0]) -
-                         float(row_in['Expected Throughput'].split()[0]))
-                if delta >= -self.gap:
-                    result = 'PASS'
+                row_dict0 = dict()
+                row_dict0['performance'] = list()
+                row_dict0['parameters'] = list()
+                result_throughput = float(row_in['Throughput'].split()[0])
+                expected_throughput = float(row_in['Expected Throughput'].split()[0])
+                # delta value and accepted tolerance in percentage
+                delta = result_throughput - expected_throughput
+                if delta > -self.gap:
+                    row_dict0['status'] = 'PASS'
                 else:
-                    result = 'FAIL'
-                row_dict['throughput'] = dict(
-                    delta=delta, unit=row_in['Throughput'].split()[1],
-                    result=result)
-                json_obj['results'].append(row_dict)
+                    row_dict0['status'] = 'FAIL'
+                row_dict1 = dict(name="Throughput", value=result_throughput, unit="Mpps", delta=delta)
+                row_dict2 = dict(name="Txd/Rxd", value=row_in["TXD/RXD"], unit="descriptor")
+                row_dict3 = dict(name="frame_size", value=row_in["Frame Size"], unit="bytes")
+                row_dict0['performance'].append(row_dict1)
+                row_dict0['parameters'].append(row_dict2)
+                row_dict0['parameters'].append(row_dict3)
+                json_obj[case_name].append(row_dict0)
+                status_result.append(row_dict0['status'])
+        self.verify("FAIL" not in status_result, "Excessive gap between test results and expectations")
         with open(os.path.join(rst.path2Result,
                                '{0:s}_single_core_perf.json'.format(
                                    self.nic)), 'w') as fp:
-- 
2.17.2


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dts] [PATCH V1]tests/nic_single_core:Save standardized test results
  2020-02-21  7:40 [dts] [PATCH V1]tests/nic_single_core:Save standardized test results hanyingya
@ 2020-02-21  7:57 ` Tu, Lijuan
  0 siblings, 0 replies; 2+ messages in thread
From: Tu, Lijuan @ 2020-02-21  7:57 UTC (permalink / raw)
  To: Han, YingyaX, dts; +Cc: Han, YingyaX

Applied, thanks

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of hanyingya
> Sent: Friday, February 21, 2020 3:41 PM
> To: dts@dpdk.org
> Cc: Han, YingyaX <yingyax.han@intel.com>
> Subject: [dts] [PATCH V1]tests/nic_single_core:Save standardized test results
> 
> Rich results in json format and remove redundant verification code.
> 
> Signed-off-by: hanyingya <yingyax.han@intel.com>
> ---
>  tests/TestSuite_nic_single_core_perf.py | 54 +++++++++++--------------
>  1 file changed, 23 insertions(+), 31 deletions(-)
> 
> diff --git a/tests/TestSuite_nic_single_core_perf.py
> b/tests/TestSuite_nic_single_core_perf.py
> index 5781bc4..5fafe5f 100644
> --- a/tests/TestSuite_nic_single_core_perf.py
> +++ b/tests/TestSuite_nic_single_core_perf.py
> @@ -197,20 +197,7 @@ class TestNicSingleCorePerf(TestCase):
>          self.verify(self.nb_ports >= 1, "At least 1 port is required to test")
>          self.perf_test(self.nb_ports)
>          self.handle_results()
> -
> -        # check the gap between expected throughput and actual throughput
> -        try:
> -            for frame_size in list(self.test_parameters.keys()):
> -                for nb_desc in self.test_parameters[frame_size]:
> -                    cur_gap = (self.expected_throughput[frame_size][nb_desc] -
> self.throughput[frame_size][nb_desc])
> -                    self.verify(cur_gap < self.gap, "Beyond Gap, Possible regression")
> -        except Exception as e:
> -            self.logger.error(e)
> -            self.handle_expected()
> -            raise VerifyFailure(
> -                "Possible regression, Check your configuration please")
> -        else:
> -            self.handle_expected()
> +        self.handle_expected()
> 
>      def handle_expected(self):
>          """
> @@ -336,27 +323,32 @@ class TestNicSingleCorePerf(TestCase):
>          if self.save_result_flag is True
>          '''
>          json_obj = dict()
> -        json_obj['nic_type'] = self.nic
> -        json_obj['results'] = list()
> +        case_name = self.running_case
> +        json_obj[case_name] = list()
> +        status_result = []
>          for frame_size in list(self.test_parameters.keys()):
>              for nb_desc in self.test_parameters[frame_size]:
>                  row_in = self.test_result[frame_size][nb_desc]
> -                row_dict = dict()
> -                row_dict['parameters'] = dict()
> -                row_dict['parameters']['frame_size'] = dict(
> -                    value=row_in['Frame Size'], unit='bytes')
> -                row_dict['parameters']['txd/rxd'] = dict(
> -                    value=row_in['TXD/RXD'], unit='descriptors')
> -                delta = (float(row_in['Throughput'].split()[0]) -
> -                         float(row_in['Expected Throughput'].split()[0]))
> -                if delta >= -self.gap:
> -                    result = 'PASS'
> +                row_dict0 = dict()
> +                row_dict0['performance'] = list()
> +                row_dict0['parameters'] = list()
> +                result_throughput = float(row_in['Throughput'].split()[0])
> +                expected_throughput = float(row_in['Expected
> Throughput'].split()[0])
> +                # delta value and accepted tolerance in percentage
> +                delta = result_throughput - expected_throughput
> +                if delta > -self.gap:
> +                    row_dict0['status'] = 'PASS'
>                  else:
> -                    result = 'FAIL'
> -                row_dict['throughput'] = dict(
> -                    delta=delta, unit=row_in['Throughput'].split()[1],
> -                    result=result)
> -                json_obj['results'].append(row_dict)
> +                    row_dict0['status'] = 'FAIL'
> +                row_dict1 = dict(name="Throughput", value=result_throughput,
> unit="Mpps", delta=delta)
> +                row_dict2 = dict(name="Txd/Rxd", value=row_in["TXD/RXD"],
> unit="descriptor")
> +                row_dict3 = dict(name="frame_size", value=row_in["Frame Size"],
> unit="bytes")
> +                row_dict0['performance'].append(row_dict1)
> +                row_dict0['parameters'].append(row_dict2)
> +                row_dict0['parameters'].append(row_dict3)
> +                json_obj[case_name].append(row_dict0)
> +                status_result.append(row_dict0['status'])
> +        self.verify("FAIL" not in status_result, "Excessive gap between test
> results and expectations")
>          with open(os.path.join(rst.path2Result,
>                                 '{0:s}_single_core_perf.json'.format(
>                                     self.nic)), 'w') as fp:
> --
> 2.17.2


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-02-21  7:57 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-21  7:40 [dts] [PATCH V1]tests/nic_single_core:Save standardized test results hanyingya
2020-02-21  7:57 ` Tu, Lijuan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).