* [dts] [PATCH V1 1/3] tests/*: changed eal -w parameter to -a
@ 2021-10-13 9:24 Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 2/3] test_plans/*: " Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 3/3] conf/*: " Jun Dong
0 siblings, 2 replies; 4+ messages in thread
From: Jun Dong @ 2021-10-13 9:24 UTC (permalink / raw)
To: dts; +Cc: PingX.Yu, weix.ling, junx.dong
- changed eal parameter -w to -a for all test suite
Signed-off-by: Jun Dong <junx.dong@intel.com>
---
tests/TestSuite_cloud_filter_with_l4_port.py | 2 +-
| 2 +-
| 2 +-
| 2 +-
| 2 +-
| 2 +-
| 2 +-
tests/TestSuite_cvl_fdir.py | 4 +-
| 2 +-
tests/TestSuite_dpdk_hugetlbfs_mount_size.py | 18 ++++----
tests/TestSuite_dynamic_queue.py | 2 +-
...tSuite_enable_package_download_in_ice_driver.py | 4 +-
tests/TestSuite_eventdev_perf.py | 4 +-
tests/TestSuite_eventdev_pipeline.py | 2 +-
tests/TestSuite_eventdev_pipeline_perf.py | 6 +--
tests/TestSuite_floating_veb.py | 4 +-
tests/TestSuite_generic_flow_api.py | 54 +++++++++++-----------
tests/TestSuite_iavf.py | 2 +-
.../TestSuite_iavf_package_driver_error_handle.py | 2 +-
| 4 +-
tests/TestSuite_l2fwd.py | 2 +-
tests/TestSuite_l2tp_esp_coverage.py | 2 +-
.../TestSuite_malicious_driver_event_indication.py | 4 +-
tests/TestSuite_multiprocess.py | 6 +--
tests/TestSuite_nic_single_core_perf.py | 2 +-
tests/TestSuite_performance_thread.py | 2 +-
tests/TestSuite_pmd.py | 2 +-
tests/TestSuite_port_representor.py | 6 +--
...te_pvp_virtio_user_multi_queues_port_restart.py | 2 +-
tests/TestSuite_qinq_filter.py | 8 ++--
tests/TestSuite_qos_api.py | 2 +-
tests/TestSuite_runtime_vf_queue_number.py | 24 +++++-----
tests/TestSuite_runtime_vf_queue_number_maxinum.py | 10 ++--
tests/TestSuite_softnic.py | 2 +-
tests/TestSuite_sriov_kvm.py | 2 +-
tests/TestSuite_stats_checks.py | 2 +-
tests/TestSuite_telemetry.py | 2 +-
tests/TestSuite_unit_tests_event_timer.py | 4 +-
tests/TestSuite_vf_l3fwd.py | 4 +-
tests/TestSuite_vf_single_core_perf.py | 2 +-
tests/TestSuite_vm2vm_virtio_pmd.py | 2 +-
tests/TestSuite_vmdq.py | 2 +-
tests/TestSuite_vmdq_dcb.py | 2 +-
tests/flexible_common.py | 6 +--
tests/perf_test_base.py | 8 ++--
45 files changed, 116 insertions(+), 116 deletions(-)
diff --git a/tests/TestSuite_cloud_filter_with_l4_port.py b/tests/TestSuite_cloud_filter_with_l4_port.py
index 272b86c..0e2e548 100644
--- a/tests/TestSuite_cloud_filter_with_l4_port.py
+++ b/tests/TestSuite_cloud_filter_with_l4_port.py
@@ -84,7 +84,7 @@ class TestCloudFilterWithL4Port(TestCase):
"""
self.dut.kill_all()
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d --disable-rss" % (MAX_QUEUE, MAX_QUEUE), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d --disable-rss" % (MAX_QUEUE, MAX_QUEUE), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set promisc all off", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
--git a/tests/TestSuite_cvl_advanced_iavf_rss_gtpu.py b/tests/TestSuite_cvl_advanced_iavf_rss_gtpu.py
index d19ddbf..0ce0e7e 100644
--- a/tests/TestSuite_cvl_advanced_iavf_rss_gtpu.py
+++ b/tests/TestSuite_cvl_advanced_iavf_rss_gtpu.py
@@ -6971,7 +6971,7 @@ class TestCVLAdvancedIAVFRSSGTPU(TestCase):
# if support add --disable-rss
param = "--rxq=16 --txq=16"
self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.vf0_pci}", socket=self.ports_socket)
+ eal_param=f"-a {self.vf0_pci}", socket=self.ports_socket)
'''
self.symmetric = symmetric
if symmetric:
--git a/tests/TestSuite_cvl_advanced_rss.py b/tests/TestSuite_cvl_advanced_rss.py
index 8ed6d1a..7c8a317 100644
--- a/tests/TestSuite_cvl_advanced_rss.py
+++ b/tests/TestSuite_cvl_advanced_rss.py
@@ -4920,7 +4920,7 @@ class AdvancedRSSTest(TestCase):
else:
param = "--rxq=64 --txq=64 --disable-rss --rxd=384 --txd=384"
out = self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.pci0}", socket=self.ports_socket)
+ eal_param=f"-a {self.pci0}", socket=self.ports_socket)
self.symmetric = symmetric
if symmetric is True:
'''
--git a/tests/TestSuite_cvl_advanced_rss_gtpogre.py b/tests/TestSuite_cvl_advanced_rss_gtpogre.py
index 4a889ea..3033561 100755
--- a/tests/TestSuite_cvl_advanced_rss_gtpogre.py
+++ b/tests/TestSuite_cvl_advanced_rss_gtpogre.py
@@ -3158,7 +3158,7 @@ class TestCVLAdvancedRSSGTPoGRE(TestCase):
else:
param = "--rxq=64 --txq=64 --disable-rss --rxd=384 --txd=384"
self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.pci0}", socket=self.ports_socket)
+ eal_param=f"-a {self.pci0}", socket=self.ports_socket)
self.enable_rss = enable_rss
if set_rss:
self.pmd_output.execute_cmd('port config all rss all')
--git a/tests/TestSuite_cvl_advanced_rss_gtpu.py b/tests/TestSuite_cvl_advanced_rss_gtpu.py
index 37be57f..e09b787 100755
--- a/tests/TestSuite_cvl_advanced_rss_gtpu.py
+++ b/tests/TestSuite_cvl_advanced_rss_gtpu.py
@@ -4256,7 +4256,7 @@ class TestCVLAdvancedRSSGTPU(TestCase):
else:
param = "--rxq=64 --txq=64 --disable-rss --rxd=384 --txd=384"
self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.pci0}", socket=self.ports_socket)
+ eal_param=f"-a {self.pci0}", socket=self.ports_socket)
self.enable_rss = enable_rss
if set_rss:
self.pmd_output.execute_cmd('port config all rss all')
--git a/tests/TestSuite_cvl_advanced_rss_pppoe.py b/tests/TestSuite_cvl_advanced_rss_pppoe.py
index 3952bbf..6bd21af 100644
--- a/tests/TestSuite_cvl_advanced_rss_pppoe.py
+++ b/tests/TestSuite_cvl_advanced_rss_pppoe.py
@@ -4526,7 +4526,7 @@ class Advanced_rss_pppoe(TestCase):
else:
param = "--rxq=64 --txq=64 --disable-rss --rxd=384 --txd=384"
out = self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.pci_list[0]}", socket=self.ports_socket)
+ eal_param=f"-a {self.pci_list[0]}", socket=self.ports_socket)
self.symmetric = symmetric
if symmetric:
# Need config rss in setup
--git a/tests/TestSuite_cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp.py b/tests/TestSuite_cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp.py
index df071e9..e3f0808 100644
--- a/tests/TestSuite_cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp.py
+++ b/tests/TestSuite_cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp.py
@@ -857,7 +857,7 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase):
else:
param = "--rxq=64 --txq=64 --disable-rss --rxd=384 --txd=384"
out = self.pmd_output.start_testpmd(cores="1S/4C/1T", param=param,
- eal_param=f"-w {self.pci_list[0]}", socket=self.ports_socket)
+ eal_param=f"-a {self.pci_list[0]}", socket=self.ports_socket)
self.symmetric = symmetric
if symmetric:
# Need config rss in setup
diff --git a/tests/TestSuite_cvl_fdir.py b/tests/TestSuite_cvl_fdir.py
index 1883675..541599f 100644
--- a/tests/TestSuite_cvl_fdir.py
+++ b/tests/TestSuite_cvl_fdir.py
@@ -2585,7 +2585,7 @@ class TestCVLFdir(TestCase):
self.pmd_output.start_testpmd(cores="1S/4C/1T",
param="--portmask=%s --rxq=%d --txq=%d --port-topology=loop" % (
self.portMask, rxq, txq),
- eal_param="-w %s -w %s --log-level=ice,7" % (
+ eal_param="-a %s -a %s --log-level=ice,7" % (
self.pci0, self.pci1), socket=self.ports_socket)
self.config_testpmd()
@@ -3387,7 +3387,7 @@ class TestCVLFdir(TestCase):
out = self.pmd_output.start_testpmd(cores="1S/4C/1T",
param="--portmask=%s --rxq=%d --txq=%d --port-topology=loop --cmdline-file=%s" % (
self.portMask, 64, 64, cmd_path),
- eal_param="-w %s -w %s --log-level='ice,7'" % (
+ eal_param="-a %s -a %s --log-level='ice,7'" % (
self.pci0, self.pci1), socket=self.ports_socket)
self.verify('Failed to create flow' not in out, "create some rule failed")
self.config_testpmd()
--git a/tests/TestSuite_cvl_iavf_rss_configure.py b/tests/TestSuite_cvl_iavf_rss_configure.py
index 86b2819..baf33c5 100755
--- a/tests/TestSuite_cvl_iavf_rss_configure.py
+++ b/tests/TestSuite_cvl_iavf_rss_configure.py
@@ -311,7 +311,7 @@ class IAVFRSSConfigureTest(TestCase):
"""
#Prepare testpmd EAL and parameters
self.pmdout.start_testpmd(cores=self.cores, param=param,
- eal_param=f"-w {self.vf0_pci}", socket=self.ports_socket)
+ eal_param=f"-a {self.vf0_pci}", socket=self.ports_socket)
# test link status
res = self.pmdout.wait_link_status_up('all', timeout=15)
self.verify(res is True, 'there have port link is down')
diff --git a/tests/TestSuite_dpdk_hugetlbfs_mount_size.py b/tests/TestSuite_dpdk_hugetlbfs_mount_size.py
index 48fdc8e..c170de8 100644
--- a/tests/TestSuite_dpdk_hugetlbfs_mount_size.py
+++ b/tests/TestSuite_dpdk_hugetlbfs_mount_size.py
@@ -116,7 +116,7 @@ class DpdkHugetlbfsMountSize(TestCase):
# Bind one nic port to igb_uio driver, launch testpmd
self.dut.send_expect("mount -t hugetlbfs hugetlbfs %s" % MNT_PATH[0], "#", 15)
self.logger.info("test default hugepage size start testpmd without numa")
- ttd = '%s -l %s -n %d --huge-dir %s --file-prefix=%s -w %s -- -i'
+ ttd = '%s -l %s -n %d --huge-dir %s --file-prefix=%s -a %s -- -i'
launch_ttd = ttd % (self.app_path, self.core_list1, self.mem_channels, MNT_PATH[0], vhost_name[0], self.pci_info_0)
self.dut.send_expect(launch_ttd, "testpmd> ", 120)
self.dut.send_expect("set promisc all off", "testpmd> ", 120)
@@ -128,7 +128,7 @@ class DpdkHugetlbfsMountSize(TestCase):
# resart testpmd with numa support
self.logger.info("test default hugepage size start testpmd with numa")
- ttd_secondary = '%s -l %s -n %d --huge-dir %s --file-prefix=%s -w %s -- -i --numa'
+ ttd_secondary = '%s -l %s -n %d --huge-dir %s --file-prefix=%s -a %s -- -i --numa'
launch_ttd_secondary = ttd_secondary % (self.app_path, self.core_list1, self.mem_channels, MNT_PATH[0], vhost_name[0], self.pci_info_0)
self.dut.send_expect(launch_ttd_secondary, "testpmd> ", 120)
self.dut.send_expect("set promisc all off", "testpmd> ", 120)
@@ -148,7 +148,7 @@ class DpdkHugetlbfsMountSize(TestCase):
self.logger.info("start first testpmd")
ttd = 'numactl --membind=%d %s -l %s -n %d --legacy-mem --socket-mem %s' \
- ' --huge-dir %s --file-prefix=%s -w %s -- -i --socket-num=%d --no-numa'
+ ' --huge-dir %s --file-prefix=%s -a %s -- -i --socket-num=%d --no-numa'
launch_ttd = ttd % (self.numa_id, self.app_path, self.core_list1, self.mem_channels, self.socket_mem2, MNT_PATH[0], vhost_name[0], self.pci_info_0, self.numa_id)
self.session_first.send_expect(launch_ttd, "testpmd> ", 120)
self.session_first.send_expect("set promisc all off", "testpmd> ", 120)
@@ -157,7 +157,7 @@ class DpdkHugetlbfsMountSize(TestCase):
self.logger.info("start secondary testpmd")
ttd_secondary = 'numactl --membind=%d %s -l %s -n %d --legacy-mem --socket-mem %s' \
- ' --huge-dir %s --file-prefix=%s -w %s -- -i --socket-num=%d --no-numa'
+ ' --huge-dir %s --file-prefix=%s -a %s -- -i --socket-num=%d --no-numa'
launch_ttd_secondary = ttd_secondary % (self.numa_id, self.app_path, self.core_list2, self.mem_channels, self.socket_mem2, MNT_PATH[1], vhost_name[1], self.pci_info_1, self.numa_id)
self.session_secondary.send_expect(launch_ttd_secondary, "testpmd> ", 120)
self.session_secondary.send_expect("set promisc all off", "testpmd> ", 120)
@@ -175,7 +175,7 @@ class DpdkHugetlbfsMountSize(TestCase):
def test_mount_size_greater_than_hugepage_size_single_mount_point(self):
# Bind one nic port to igb_uio driver
self.dut.send_expect("mount -t hugetlbfs -o size=9G hugetlbfs %s" % MNT_PATH[0], "#", 15)
- ttd = '%s -l %s -n %d --legacy-mem --huge-dir %s --file-prefix=%s -w %s -- -i'
+ ttd = '%s -l %s -n %d --legacy-mem --huge-dir %s --file-prefix=%s -a %s -- -i'
launch_ttd = ttd % (self.app_path, self.core_list1, self.mem_channels, MNT_PATH[0], vhost_name[0], self.pci_info_0)
self.dut.send_expect(launch_ttd, "testpmd> ", 120)
self.dut.send_expect("set promisc all off", "testpmd> ", 120)
@@ -196,7 +196,7 @@ class DpdkHugetlbfsMountSize(TestCase):
# launch first testpmd
self.logger.info("launch first testpmd")
ttd = 'numactl --membind=%d %s -l %s -n %d --legacy-mem --socket-mem %s --huge-dir %s' \
- ' --file-prefix=%s -w %s -- -i --socket-num=%d --no-numa'
+ ' --file-prefix=%s -a %s -- -i --socket-num=%d --no-numa'
launch_ttd = ttd % (self.numa_id, self.app_path, self.core_list1, self.mem_channels, self.socket_mem2, MNT_PATH[0], vhost_name[0], self.pci_info_0, self.numa_id)
self.session_first.send_expect(launch_ttd, "testpmd> ", 120)
self.session_first.send_expect("set promisc all off", "testpmd> ", 120)
@@ -206,7 +206,7 @@ class DpdkHugetlbfsMountSize(TestCase):
# launch secondary testpmd
self.logger.info("launch secondary testpmd")
ttd_secondary = 'numactl --membind=%d %s -l %s -n %d --legacy-mem --socket-mem %s --huge-dir' \
- ' %s --file-prefix=%s -w %s -- -i --socket-num=%d --no-numa'
+ ' %s --file-prefix=%s -a %s -- -i --socket-num=%d --no-numa'
launch_ttd_secondary = ttd_secondary % (self.numa_id, self.app_path, self.core_list2, self.mem_channels, self.socket_mem2, MNT_PATH[1], vhost_name[1], self.pci_info_1, self.numa_id)
self.session_secondary.send_expect(launch_ttd_secondary, "testpmd> ", 120)
self.session_secondary.send_expect("set promisc all off", "testpmd> ", 120)
@@ -216,7 +216,7 @@ class DpdkHugetlbfsMountSize(TestCase):
# launch third testpmd
self.logger.info("launch third testpmd")
ttd_third = 'numactl --membind=%d %s -l %s -n %d --legacy-mem --socket-mem %s --huge-dir' \
- ' %s --file-prefix=%s -w %s -- -i --socket-num=%d --no-numa'
+ ' %s --file-prefix=%s -a %s -- -i --socket-num=%d --no-numa'
launch_ttd_third = ttd_third % (self.numa_id, self.app_path, self.core_list3, self.mem_channels, self.socket_mem, MNT_PATH[2], vhost_name[2], self.pci_info_0, self.numa_id)
expect_str = 'Not enough memory available on socket'
self.dut.get_session_output(timeout=2)
@@ -248,7 +248,7 @@ class DpdkHugetlbfsMountSize(TestCase):
self.dut.send_expect("mount -t hugetlbfs nodev %s" % MNT_PATH[0], "#", 15)
self.dut.send_expect("cgcreate -g hugetlb:/test-subgroup", "# ", 15)
self.dut.send_expect("cgset -r hugetlb.1GB.limit_in_bytes=2147483648 test-subgroup", "#", 15)
- ttd = 'cgexec -g hugetlb:test-subgroup numactl -m %d %s -l %s -n %d -w %s -- -i --socket-num=%d --no-numa'
+ ttd = 'cgexec -g hugetlb:test-subgroup numactl -m %d %s -l %s -n %d -a %s -- -i --socket-num=%d --no-numa'
launch_ttd = ttd % (self.numa_id, self.app_path, self.core_list1, self.mem_channels, self.pci_info_0, self.numa_id)
self.dut.send_expect(launch_ttd, "testpmd> ", 120)
self.dut.send_expect("set promisc all off", "testpmd> ", 120)
diff --git a/tests/TestSuite_dynamic_queue.py b/tests/TestSuite_dynamic_queue.py
index 9314ff7..be8a45b 100644
--- a/tests/TestSuite_dynamic_queue.py
+++ b/tests/TestSuite_dynamic_queue.py
@@ -65,7 +65,7 @@ class TestDynamicQueue(TestCase):
elif (self.nic in ["cavium_a063", "cavium_a064"]):
eal_opts = ""
for port in self.dut_ports:
- eal_opts += "-w %s,max_pools=256 "%(self.dut.get_port_pci(self.dut_ports[port]))
+ eal_opts += "-a %s,max_pools=256 "%(self.dut.get_port_pci(self.dut_ports[port]))
self.dut_testpmd.start_testpmd(
"Default", "--port-topology=chained --txq=%s --rxq=%s"
% (self.PF_QUEUE, self.PF_QUEUE), eal_param = eal_opts)
diff --git a/tests/TestSuite_enable_package_download_in_ice_driver.py b/tests/TestSuite_enable_package_download_in_ice_driver.py
index 8bf21d1..0c85f31 100644
--- a/tests/TestSuite_enable_package_download_in_ice_driver.py
+++ b/tests/TestSuite_enable_package_download_in_ice_driver.py
@@ -95,7 +95,7 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase):
self.eal_param = ""
if safe_mode_support == "true":
for i in range(len(self.dut_ports)):
- self.eal_param = self.eal_param + "-w %s,safe-mode-support=1 " % self.dut.ports_info[i]['pci']
+ self.eal_param = self.eal_param + "-a %s,safe-mode-support=1 " % self.dut.ports_info[i]['pci']
out = self.dut_testpmd.start_testpmd("all", "--nb-cores=8 --rxq=%s --txq=%s --port-topology=chained" % (self.PF_QUEUE, self.PF_QUEUE), eal_param=self.eal_param)
if ice_pkg == "false":
if safe_mode_support == "true":
@@ -340,7 +340,7 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase):
self.copy_specify_ice_pkg(self.new_pkgs[i])
self.generate_delete_specify_pkg(pkg_ver=self.new_pkgs[i], sn=self.nic_sn[i], key="true")
- eal_param = "-w %s " % self.nic_pci[0] + "-w %s " % self.nic_pci[1] + "--log-level=8"
+ eal_param = "-a %s " % self.nic_pci[0] + "-a %s " % self.nic_pci[1] + "--log-level=8"
out = self.dut_testpmd.execute_cmd(self.path + eal_param + " -- -i ")
self.dut_testpmd.quit()
diff --git a/tests/TestSuite_eventdev_perf.py b/tests/TestSuite_eventdev_perf.py
index 7730cd1..e20c988 100644
--- a/tests/TestSuite_eventdev_perf.py
+++ b/tests/TestSuite_eventdev_perf.py
@@ -143,11 +143,11 @@ class TestEventdevPerf(TestCase):
def eventdev_cmd(self, test_type, stlist, nports, worker_cores):
self.Port_pci_ids = []
- command_line1 = self.app_command + " -l %s -w %s"
+ command_line1 = self.app_command + " -l %s -a %s"
for i in range(0, nports):
self.Port_pci_ids.append(self.dut.ports_info[i]['pci'])
## Adding core-list and pci-ids
- command_line1 = command_line1 + " -w %s "
+ command_line1 = command_line1 + " -a %s "
## Adding test and stage types
command_line2 = "-- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=%s --stlist=%s --wlcores=%s" %(test_type, stlist, worker_cores)
return command_line1 + command_line2
diff --git a/tests/TestSuite_eventdev_pipeline.py b/tests/TestSuite_eventdev_pipeline.py
index 33091b6..3295950 100644
--- a/tests/TestSuite_eventdev_pipeline.py
+++ b/tests/TestSuite_eventdev_pipeline.py
@@ -99,7 +99,7 @@ class TestEventdevPipeline(TestCase):
ports=[self.dut.ports_info[0]['pci']])
command_line = "taskset -c %s " + self.app_command + \
"/build/eventdev_pipeline %s " + \
- "--vdev event_sw0 -- -r%s -t%s -e%s -w %s -s1 -n0 -c32 -W1000 %s -D"
+ "--vdev event_sw0 -- -r%s -t%s -e%s -a %s -s1 -n0 -c32 -W1000 %s -D"
command_line = command_line % (
self.taskset_core_list, eal_params, self.core_mask_rx,
self.core_mask_tx, self.core_mask_sd, self.core_mask_wk, cmd_type)
diff --git a/tests/TestSuite_eventdev_pipeline_perf.py b/tests/TestSuite_eventdev_pipeline_perf.py
index 69cb4aa..830c91a 100644
--- a/tests/TestSuite_eventdev_pipeline_perf.py
+++ b/tests/TestSuite_eventdev_pipeline_perf.py
@@ -143,13 +143,13 @@ class TestEventdevPipelinePerf(TestCase):
def eventdev_cmd(self, stlist, nports, wmask):
self.Port_pci_ids = []
- command_line1 = self.app_command + " -c %s -w %s"
+ command_line1 = self.app_command + " -c %s -a %s"
for i in range(0, nports):
self.Port_pci_ids.append(self.dut.ports_info[i]['pci'])
## Adding core-list and pci-ids
- command_line1 = command_line1 + " -w %s "
+ command_line1 = command_line1 + " -a %s "
## Adding test and stage types
- command_line2 = "-- -w %s -n=0 --dump %s -m 16384" % (wmask , stlist )
+ command_line2 = "-- -a %s -n=0 --dump %s -m 16384" % (wmask , stlist )
return command_line1 + command_line2
def test_perf_eventdev_pipeline_1ports_atomic_performance(self):
diff --git a/tests/TestSuite_floating_veb.py b/tests/TestSuite_floating_veb.py
index 8bfc4c4..c012abd 100644
--- a/tests/TestSuite_floating_veb.py
+++ b/tests/TestSuite_floating_veb.py
@@ -367,7 +367,7 @@ class TestFloatingVEBSwitching(TestCase):
"""
self.setup_env(driver=self.drivername, vf_num=4)
# start PF
- cmd = self.path + "-c 0xf -n 4 --socket-mem 1024,1024 -w \"%s,enable_floating_veb=1,floating_veb_list=0;2-3\" --file-prefix=test1 -- -i" % self.pf_pci
+ cmd = self.path + "-c 0xf -n 4 --socket-mem 1024,1024 -a \"%s,enable_floating_veb=1,floating_veb_list=0;2-3\" --file-prefix=test1 -- -i" % self.pf_pci
self.dut.send_expect(cmd, "testpmd> ", 120)
self.dut.send_expect("port start all", "testpmd>")
time.sleep(2)
@@ -497,7 +497,7 @@ class TestFloatingVEBSwitching(TestCase):
"""
self.setup_env(driver=self.drivername, vf_num=4)
# VF0->PF
- cmd = self.path + "-c 0xf -n 4 --socket-mem 1024,1024 -w \"%s,enable_floating_veb=1,floating_veb_list=0;3\" --file-prefix=test1 -- -i" % self.pf_pci
+ cmd = self.path + "-c 0xf -n 4 --socket-mem 1024,1024 -a \"%s,enable_floating_veb=1,floating_veb_list=0;3\" --file-prefix=test1 -- -i" % self.pf_pci
self.dut.send_expect(cmd, "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd>")
self.dut.send_expect("set promisc all off", "testpmd>")
diff --git a/tests/TestSuite_generic_flow_api.py b/tests/TestSuite_generic_flow_api.py
index 32805e4..2c17e4f 100644
--- a/tests/TestSuite_generic_flow_api.py
+++ b/tests/TestSuite_generic_flow_api.py
@@ -863,7 +863,7 @@ class TestGeneric_flow_api(TestCase):
"powerville", "fortville_eagle", "fortville_25g", "fortville_spirit", "carlsville",
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T", "foxville","columbiaville_25g","columbiaville_100g"], "%s nic not support ethertype filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
@@ -916,19 +916,19 @@ class TestGeneric_flow_api(TestCase):
self.setup_env()
# start testpmd on pf
- self.pmdout.start_testpmd("1S/4C/1T", "--rxq=4 --txq=4", "-w %s --file-prefix=pf -m 1024" % self.pf_pci)
+ self.pmdout.start_testpmd("1S/4C/1T", "--rxq=4 --txq=4", "-a %s --file-prefix=pf -m 1024" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
# start testpmd on vf0
- self.session_secondary.send_expect("%s -c 0x1e0 -n 4 -m 1024 -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0 -n 4 -m 1024 -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd> ")
self.session_secondary.send_expect("set verbose 1", "testpmd> ")
self.session_secondary.send_expect("start", "testpmd> ")
time.sleep(2)
# start testpmd on vf1
- self.session_third.send_expect("%s -c 0x1e00 -n 4 -m 1024 -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e00 -n 4 -m 1024 -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd> ")
self.session_third.send_expect("set verbose 1", "testpmd> ")
self.session_third.send_expect("start", "testpmd> ")
@@ -962,7 +962,7 @@ class TestGeneric_flow_api(TestCase):
self.verify(self.nic in ["fortville_eagle", "fortville_25g", "fortville_spirit", "carlsville",
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T", "foxville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir L2 payload filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
@@ -1009,7 +1009,7 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV",
"fortpark_BASE-T","fortville_25g","carlsville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("port config all rss all", "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
@@ -1098,7 +1098,7 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV",
"fortpark_BASE-T","fortville_25g","carlsville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("port config all rss all", "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
@@ -1187,7 +1187,7 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV",
"fortpark_BASE-T","fortville_25g","carlsville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("port config all rss all", "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
@@ -1276,7 +1276,7 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV",
"fortpark_BASE-T","fortville_25g","carlsville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("port config all rss all", "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
@@ -1364,7 +1364,7 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV",
"fortpark_BASE-T","fortville_25g","carlsville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
- self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=test1" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.cores, "--rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=test1" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
@@ -1427,19 +1427,19 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T", "foxville","columbiaville_25g","columbiaville_100g"], "%s nic not support fdir vlan filter" % self.nic)
self.setup_env()
# start testpmd on pf
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
# start testpmd on vf0
- self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd>")
self.session_secondary.send_expect("set verbose 1", "testpmd>")
self.session_secondary.send_expect("start", "testpmd>")
time.sleep(2)
# start testpmd on vf1
- self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd>")
self.session_third.send_expect("set verbose 1", "testpmd>")
self.session_third.send_expect("start", "testpmd>")
@@ -1539,19 +1539,19 @@ class TestGeneric_flow_api(TestCase):
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T", "foxville", "carlsville"]):
self.setup_env()
# start testpmd on pf
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
# start testpmd on vf0
- self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd>")
self.session_secondary.send_expect("set verbose 1", "testpmd>")
self.session_secondary.send_expect("start", "testpmd>")
time.sleep(2)
# start testpmd on vf1
- self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd>")
self.session_third.send_expect("set verbose 1", "testpmd>")
self.session_third.send_expect("start", "testpmd>")
@@ -1685,17 +1685,17 @@ class TestGeneric_flow_api(TestCase):
if (self.nic in ["fortville_eagle", "fortville_25g", "fortville_spirit","columbiaville_25g","columbiaville_100g",
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T", "carlsville"]):
self.setup_env()
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
- self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd>")
self.session_secondary.send_expect("set verbose 1", "testpmd>")
self.session_secondary.send_expect("start", "testpmd>")
time.sleep(2)
- self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd>")
self.session_third.send_expect("set verbose 1", "testpmd>")
self.session_third.send_expect("start", "testpmd>")
@@ -1850,7 +1850,7 @@ class TestGeneric_flow_api(TestCase):
# i40e
if (self.nic in ["fortville_eagle", "fortville_25g", "fortville_spirit", "carlsville",
"fortville_spirit_single", "fortpark_TLV","fortpark_BASE-T","columbiaville_25g","columbiaville_100g"]):
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
@@ -1954,7 +1954,7 @@ class TestGeneric_flow_api(TestCase):
self.dut.send_expect("quit", "# ")
time.sleep(2)
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
@@ -2306,18 +2306,18 @@ class TestGeneric_flow_api(TestCase):
"%s nic not support tunnel vxlan filter" % self.nic)
self.setup_env()
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 120)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
- self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd>")
self.session_secondary.send_expect("set verbose 1", "testpmd>")
self.session_secondary.send_expect("start", "testpmd>")
time.sleep(2)
- self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd>")
self.session_third.send_expect("set verbose 1", "testpmd>")
self.session_third.send_expect("start", "testpmd>")
@@ -2377,17 +2377,17 @@ class TestGeneric_flow_api(TestCase):
"%s nic not support tunnel nvgre filter" % self.nic)
self.setup_env()
- self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-w %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
+ self.pmdout.start_testpmd("%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE+1, MAX_QUEUE+1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 120)
self.dut.send_expect("set verbose 1", "testpmd> ", 120)
self.dut.send_expect("start", "testpmd> ", 120)
time.sleep(2)
- self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
+ self.session_secondary.send_expect("%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[0].pci), "testpmd>", 120)
self.session_secondary.send_expect("set fwd rxonly", "testpmd>")
self.session_secondary.send_expect("set verbose 1", "testpmd>")
self.session_secondary.send_expect("start", "testpmd>")
time.sleep(2)
- self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -w %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
+ self.session_third.send_expect("%s -c 0x1e000000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf2 -- -i --rxq=4 --txq=4 --disable-rss" % (self.app_path, self.sriov_vfs_port[1].pci), "testpmd>", 120)
self.session_third.send_expect("set fwd rxonly", "testpmd>")
self.session_third.send_expect("set verbose 1", "testpmd>")
self.session_third.send_expect("start", "testpmd>")
diff --git a/tests/TestSuite_iavf.py b/tests/TestSuite_iavf.py
index adaa130..7c5ace3 100644
--- a/tests/TestSuite_iavf.py
+++ b/tests/TestSuite_iavf.py
@@ -114,7 +114,7 @@ class TestIavf(TestCase):
# start testpmd for pf
self.dut_testpmd = PmdOutput(self.dut)
- host_eal_param = '-w %s -w %s' % (self.pf_pci0, self.pf_pci1)
+ host_eal_param = '-a %s -a %s' % (self.pf_pci0, self.pf_pci1)
self.dut_testpmd.start_testpmd(
"Default", "--rxq=4 --txq=4 --port-topology=chained", eal_param=host_eal_param)
diff --git a/tests/TestSuite_iavf_package_driver_error_handle.py b/tests/TestSuite_iavf_package_driver_error_handle.py
index d155afc..c180c5a 100644
--- a/tests/TestSuite_iavf_package_driver_error_handle.py
+++ b/tests/TestSuite_iavf_package_driver_error_handle.py
@@ -98,7 +98,7 @@ class Testiavf_package_and_driver_check(TestCase):
self.eal_param = ""
if safe_mode_support == "true":
for i in range(len(self.dut_ports)):
- self.eal_param = self.eal_param + "-w %s,safe-mode-support=1 " % self.dut.ports_info[i]['pci']
+ self.eal_param = self.eal_param + "-a %s,safe-mode-support=1 " % self.dut.ports_info[i]['pci']
out = self.dut_testpmd.start_testpmd("all", "--nb-cores=8 --rxq=%s --txq=%s --port-topology=chained" % (self.PF_QUEUE, self.PF_QUEUE), eal_param=self.eal_param)
if ice_pkg == "false":
if safe_mode_support == "true":
--git a/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py b/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py
index eeef6c7..4f92888 100644
--- a/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py
+++ b/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py
@@ -231,7 +231,7 @@ class TestIxgbeVfGetExtraInfo(TestCase):
# start testpmd with PF on the host
self.dut_testpmd = PmdOutput(self.dut)
self.dut_testpmd.start_testpmd(
- "%s" % self.cores, "--rxq=4 --txq=4 --nb-cores=4", "-w %s" % self.pf_pci)
+ "%s" % self.cores, "--rxq=4 --txq=4 --nb-cores=4", "-a %s" % self.pf_pci)
self.dut_testpmd.execute_cmd("port stop 0")
self.dut_testpmd.execute_cmd("port config 0 dcb vt on 4 pfc off")
self.dut_testpmd.execute_cmd("port start 0")
@@ -257,7 +257,7 @@ class TestIxgbeVfGetExtraInfo(TestCase):
# start testpmd with PF on the host
self.dut_testpmd = PmdOutput(self.dut)
self.dut_testpmd.start_testpmd(
- "%s" % self.cores, "--rxq=2 --txq=2 --nb-cores=2", "-w %s" % self.pf_pci)
+ "%s" % self.cores, "--rxq=2 --txq=2 --nb-cores=2", "-a %s" % self.pf_pci)
self.dut_testpmd.execute_cmd("start")
time.sleep(5)
self.setup_vm_env()
diff --git a/tests/TestSuite_l2fwd.py b/tests/TestSuite_l2fwd.py
index 091ac09..2c44858 100644
--- a/tests/TestSuite_l2fwd.py
+++ b/tests/TestSuite_l2fwd.py
@@ -175,7 +175,7 @@ class TestL2fwd(TestCase):
eal_params = self.dut.create_eal_parameters(cores=cores)
eal_param = ""
for i in ports:
- eal_param += " -w %s" % self.dut.ports_info[i]['pci']
+ eal_param += " -a %s" % self.dut.ports_info[i]['pci']
for frame_size in self.frame_sizes:
diff --git a/tests/TestSuite_l2tp_esp_coverage.py b/tests/TestSuite_l2tp_esp_coverage.py
index 10520c8..96378db 100644
--- a/tests/TestSuite_l2tp_esp_coverage.py
+++ b/tests/TestSuite_l2tp_esp_coverage.py
@@ -153,7 +153,7 @@ class L2tpEspCoverage(TestCase):
param_str = " --rxq=16 --txq=16 --port-topology=loop --enable-rx-cksum "
else:
param_str = " --rxq=16 --txq=16 --port-topology=loop "
- self.pmd_output.start_testpmd(cores="1S/8C/1T", param=param_str, eal_param="-w %s" % port_pci)
+ self.pmd_output.start_testpmd(cores="1S/8C/1T", param=param_str, eal_param="-a %s" % port_pci)
self.dut.send_expect("set fwd rxonly", "testpmd> ", 15)
self.dut.send_expect("set verbose 1", "testpmd> ", 15)
diff --git a/tests/TestSuite_malicious_driver_event_indication.py b/tests/TestSuite_malicious_driver_event_indication.py
index 8b446cf..c163c47 100644
--- a/tests/TestSuite_malicious_driver_event_indication.py
+++ b/tests/TestSuite_malicious_driver_event_indication.py
@@ -252,9 +252,9 @@ class TestSuiteMaliciousDrvEventIndication(TestCase):
# get whitelist and cores
socket = self.dut.get_numa_id(self.dut_ports[0])
corelist = self.dut.get_core_list("1S/6C/1T", socket=socket)[2:]
- self.pf_pmd_whitelist = '-w ' + self.vf_ports_info[0].get('pf_pci')
+ self.pf_pmd_whitelist = '-a ' + self.vf_ports_info[0].get('pf_pci')
self.pf_pmd_cores = corelist[:2]
- self.vf_pmd_allowlst = '-w ' + self.vf_ports_info[0].get('vfs_pci')[0]
+ self.vf_pmd_allowlst = '-a ' + self.vf_ports_info[0].get('vfs_pci')[0]
self.vf_pmd_cores = corelist[2:]
def init_params(self):
diff --git a/tests/TestSuite_multiprocess.py b/tests/TestSuite_multiprocess.py
index 8752a72..c6f5957 100644
--- a/tests/TestSuite_multiprocess.py
+++ b/tests/TestSuite_multiprocess.py
@@ -84,7 +84,7 @@ class TestMultiprocess(TestCase):
self.eal_param = ""
for i in self.dut_ports:
- self.eal_param += " -w %s" % self.dut.ports_info[i]['pci']
+ self.eal_param += " -a %s" % self.dut.ports_info[i]['pci']
self.eal_para = self.dut.create_eal_parameters(cores='1S/2C/1T')
# start new session to run secondary
@@ -266,7 +266,7 @@ class TestMultiprocess(TestCase):
for index in range(len(coreList)):
dut_new_session = self.dut.new_session()
dutSessionList.append(dut_new_session)
- # add -w option when tester and dut in same server
+ # add -a option when tester and dut in same server
dut_new_session.send_expect(
self.app_symmetric_mp + " -c %s --proc-type=auto %s -- -p %s --num-procs=%d --proc-id=%d" % (
utils.create_mask([coreList[index]]), self.eal_param, portMask, execution['nprocs'], index), "Finished Process Init")
@@ -324,7 +324,7 @@ class TestMultiprocess(TestCase):
# get core with socket parameter to specified which core dut used when tester and dut in same server
coreMask = utils.create_mask(self.dut.get_core_list('1S/1C/1T', socket=self.socket))
portMask = utils.create_mask(self.dut_ports)
- # specified mp_server core and add -w option when tester and dut in same server
+ # specified mp_server core and add -a option when tester and dut in same server
self.dut.send_expect(self.app_mp_server + " -n %d -c %s %s -- -p %s -n %d" % (
self.dut.get_memory_channels(), coreMask, self.eal_param, portMask, execution['nprocs']), "Finished Process Init", 20)
self.dut.send_expect("^Z", "\r\n")
diff --git a/tests/TestSuite_nic_single_core_perf.py b/tests/TestSuite_nic_single_core_perf.py
index 4ccc04a..c247ae7 100644
--- a/tests/TestSuite_nic_single_core_perf.py
+++ b/tests/TestSuite_nic_single_core_perf.py
@@ -220,7 +220,7 @@ class TestNicSingleCorePerf(TestCase):
# ports allowlist
eal_para = ""
for i in range(port_num):
- eal_para += " -w " + self.dut.ports_info[i]['pci']
+ eal_para += " -a " + self.dut.ports_info[i]['pci']
port_mask = utils.create_mask(self.dut_ports)
diff --git a/tests/TestSuite_performance_thread.py b/tests/TestSuite_performance_thread.py
index 459d9a1..0928df8 100644
--- a/tests/TestSuite_performance_thread.py
+++ b/tests/TestSuite_performance_thread.py
@@ -189,7 +189,7 @@ class TestPerformanceThread(TestCase):
self.test_results["data"] = []
eal_param = ""
for i in valports:
- eal_param += " -w %s" % self.dut.ports_info[i]['pci']
+ eal_param += " -a %s" % self.dut.ports_info[i]['pci']
for cores in self.nb_cores:
core_list, core_mask = self.create_cores(cores)
diff --git a/tests/TestSuite_pmd.py b/tests/TestSuite_pmd.py
index afc36d9..8253c43 100644
--- a/tests/TestSuite_pmd.py
+++ b/tests/TestSuite_pmd.py
@@ -397,7 +397,7 @@ class TestPmd(TestCase):
eal_opts = ""
for port in self.dut_ports:
- eal_opts += "-w %s,scalar_enable=1 "%(self.dut.get_port_pci(self.dut_ports[port]))
+ eal_opts += "-a %s,scalar_enable=1 "%(self.dut.get_port_pci(self.dut_ports[port]))
self.pmdout.start_testpmd("1S/2C/1T", "--portmask=%s" % port_mask, eal_param = eal_opts, socket=self.ports_socket)
diff --git a/tests/TestSuite_port_representor.py b/tests/TestSuite_port_representor.py
index e2fe0dc..f169473 100644
--- a/tests/TestSuite_port_representor.py
+++ b/tests/TestSuite_port_representor.py
@@ -104,14 +104,14 @@ class TestPortRepresentor(TestCase):
self.vf_flag = 0
def testpmd_pf(self):
- self.pmdout_pf.start_testpmd("Default", eal_param="-w %s,representor=0-1" % self.pf_pci, param="--port-topology=chained --total-num-mbufs=120000")
+ self.pmdout_pf.start_testpmd("Default", eal_param="-a %s,representor=0-1" % self.pf_pci, param="--port-topology=chained --total-num-mbufs=120000")
def testpmd_vf0(self):
- self.out_vf0 = self.pmdout_vf0.start_testpmd("Default", eal_param="-w %s --file-prefix testpmd-vf0" % self.vfs_pci[0], param="--total-num-mbufs=120000")
+ self.out_vf0 = self.pmdout_vf0.start_testpmd("Default", eal_param="-a %s --file-prefix testpmd-vf0" % self.vfs_pci[0], param="--total-num-mbufs=120000")
self.vf0_mac = self.pmdout_vf0.get_port_mac(0)
def testpmd_vf1(self):
- self.out_vf1 = self.pmdout_vf1.start_testpmd("Default", eal_param="-w %s --file-prefix testpmd-vf1" % self.vfs_pci[1], param="--total-num-mbufs=120000")
+ self.out_vf1 = self.pmdout_vf1.start_testpmd("Default", eal_param="-a %s --file-prefix testpmd-vf1" % self.vfs_pci[1], param="--total-num-mbufs=120000")
self.vf1_mac = self.pmdout_vf1.get_port_mac(0)
def check_port_stats(self):
diff --git a/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py b/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py
index eefc5bb..841d4ca 100644
--- a/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py
+++ b/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py
@@ -96,7 +96,7 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase):
vdev = "'net_vhost0,iface=vhost-net,queues=2,client=0'"
param = "--nb-cores=2 --rxq={} --txq={} --rss-ip".format(self.queue_number, self.queue_number)
self.vhost_pmd.start_testpmd(cores=self.core_list[2:5], param=param, \
- eal_param="-w {} --file-prefix=vhost --vdev {}".format(self.pci_info, vdev))
+ eal_param="-a {} --file-prefix=vhost --vdev {}".format(self.pci_info, vdev))
self.vhost_pmd.execute_cmd("set fwd mac", "testpmd> ", 120)
self.vhost_pmd.execute_cmd("start", "testpmd> ", 120)
diff --git a/tests/TestSuite_qinq_filter.py b/tests/TestSuite_qinq_filter.py
index e19fe0f..9e1671c 100644
--- a/tests/TestSuite_qinq_filter.py
+++ b/tests/TestSuite_qinq_filter.py
@@ -217,13 +217,13 @@ class TestQinqFilter(TestCase):
self.dut.send_expect(r'flow create 0 ingress pattern eth / vlan tci is 3 / vlan tci is 4094 / end actions pf / queue index 1 / end', "testpmd> ")
vf0_session.send_expect(r'%s -c %s -n 4 \
- --socket-mem=1024,1024 --file-prefix=vf0 -w %s -- -i --port-topology=loop \
+ --socket-mem=1024,1024 --file-prefix=vf0 -a %s -- -i --port-topology=loop \
--rxq=4 --txq=4 --disable-rss'
% (self.path, self.coreMask, vf_list[0]),
"testpmd> ", 30)
vf1_session.send_expect(r'%s -c %s -n 4 \
- --socket-mem=1024,1024 --file-prefix=vf1 -w %s -- -i --port-topology=loop \
+ --socket-mem=1024,1024 --file-prefix=vf1 -a %s -- -i --port-topology=loop \
--rxq=4 --txq=4 --disable-rss'
% (self.path, self.coreMask, vf_list[1]),
"testpmd>", 30)
@@ -289,13 +289,13 @@ class TestQinqFilter(TestCase):
self.dut.send_expect('vlan set outer tpid 0x88a8 0', "testpmd")
vf0_session.send_expect(r'%s -c %s -n 4 \
- --socket-mem=1024,1024 --file-prefix=vf0 -w %s -- -i --port-topology=loop \
+ --socket-mem=1024,1024 --file-prefix=vf0 -a %s -- -i --port-topology=loop \
--rxq=4 --txq=4 --disable-rss'
% (self.path, self.coreMask, vf_list[0]),
"testpmd> ", 30)
vf1_session.send_expect(r'%s -c %s -n 4 \
- --socket-mem=1024,1024 --file-prefix=vf1 -w %s -- -i --port-topology=loop \
+ --socket-mem=1024,1024 --file-prefix=vf1 -a %s -- -i --port-topology=loop \
--rxq=4 --txq=4 --disable-rss'
% (self.path, self.coreMask, vf_list[1]),
"testpmd>", 30)
diff --git a/tests/TestSuite_qos_api.py b/tests/TestSuite_qos_api.py
index da2c544..143598f 100644
--- a/tests/TestSuite_qos_api.py
+++ b/tests/TestSuite_qos_api.py
@@ -62,7 +62,7 @@ class TestQosApi(TestCase):
# each flow to 200Mbps
self.bps = 200000000
self.bps_rate = [0, 0.1]
- self.eal_param = ' --master-lcore=1'
+ self.eal_param = ' --main-lcore=1'
# Verify that enough threads are available
cores = self.dut.get_core_list("1S/1C/1T")
self.verify(cores is not None, "Insufficient cores for speed testing")
diff --git a/tests/TestSuite_runtime_vf_queue_number.py b/tests/TestSuite_runtime_vf_queue_number.py
index 1d7d536..0b31642 100644
--- a/tests/TestSuite_runtime_vf_queue_number.py
+++ b/tests/TestSuite_runtime_vf_queue_number.py
@@ -219,10 +219,10 @@ class TestRuntimeVfQn(TestCase):
"""
valid_qn = (2, 4, 8,)
for qn in valid_qn:
- host_eal_param = '-w %s,queue-num-per-vf=%d --file-prefix=test1 --socket-mem 1024,1024' % (self.pf_pci, qn)
+ host_eal_param = '-a %s,queue-num-per-vf=%d --file-prefix=test1 --socket-mem 1024,1024' % (self.pf_pci, qn)
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
- gest_eal_param = '-w %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
+ gest_eal_param = '-a %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
self.vm0_testpmd.start_testpmd(self.pmdout.default_cores, eal_param=gest_eal_param, param='')
guest_cmds = self.testpmd_config_cmd_list(qn)
@@ -253,7 +253,7 @@ class TestRuntimeVfQn(TestCase):
:return:
"""
for invalid_qn in (0, 3, 5, 6, 7, 9, 11, 15, 17, 25,):
- eal_param = '-w %s,queue-num-per-vf=%d --file-prefix=test1 --socket-mem 1024,1024' % (self.pf_pci, invalid_qn)
+ eal_param = '-a %s,queue-num-per-vf=%d --file-prefix=test1 --socket-mem 1024,1024' % (self.pf_pci, invalid_qn)
testpmd_out = self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=eal_param)
self.verify("it must be power of 2 and equal or less than 16" in testpmd_out, "there is no 'Wrong VF queue number = 0' logs.")
self.dut.send_expect("quit", "# ")
@@ -263,10 +263,10 @@ class TestRuntimeVfQn(TestCase):
Test case 3: set valid VF queue number in testpmd command-line options
:return:
"""
- host_eal_param = '-w %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
+ host_eal_param = '-a %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
- gest_eal_param = '-w %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
+ gest_eal_param = '-a %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
for valid_qn in range(1, 17):
count = valid_qn * 10
@@ -292,9 +292,9 @@ class TestRuntimeVfQn(TestCase):
Test case 4: set invalid VF queue number in testpmd command-line options
:return:
"""
- host_eal_param = '-w %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
+ host_eal_param = '-a %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
- gest_eal_param = '-w %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
+ gest_eal_param = '-a %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
app_name = self.dut.apps_name['test-pmd']
@@ -312,10 +312,10 @@ class TestRuntimeVfQn(TestCase):
Test case 5: set valid VF queue number with testpmd function command
:return:
"""
- host_eal_param = '-w %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
+ host_eal_param = '-a %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
- gest_eal_param = '-w %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
+ gest_eal_param = '-a %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
self.vm0_testpmd.start_testpmd(self.pmdout.default_cores, eal_param=gest_eal_param, param='')
for valid_qn in range(1, 17):
@@ -343,9 +343,9 @@ class TestRuntimeVfQn(TestCase):
expect_str = ["Warning: Either rx or tx queues should be non zero",
"Fail: input rxq (257) can't be greater than max_rx_queues (256) of port 0",
"Fail: input txq (257) can't be greater than max_tx_queues (256) of port 0"]
- host_eal_param = '-w %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
+ host_eal_param = '-a %s --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
- gest_eal_param = '-w %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
+ gest_eal_param = '-a %s --file-prefix=test2' % self.vm_dut_0.ports_info[0]['pci']
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
self.vm0_testpmd.start_testpmd(self.pmdout.default_cores, eal_param=gest_eal_param, param='')
for invalid_qn in [0, 257]:
@@ -361,7 +361,7 @@ class TestRuntimeVfQn(TestCase):
Test case 7: Reserve VF queue number when VF bind to kernel driver
:return:
"""
- host_eal_param = '-w %s,queue-num-per-vf=2 --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
+ host_eal_param = '-a %s,queue-num-per-vf=2 --file-prefix=test1 --socket-mem 1024,1024' % self.pf_pci
self.host_testpmd.start_testpmd(self.pmdout.default_cores, param='', eal_param=host_eal_param)
self.vm0_testpmd = PmdOutput(self.vm_dut_0)
self.vm_dut_0.restore_interfaces()
diff --git a/tests/TestSuite_runtime_vf_queue_number_maxinum.py b/tests/TestSuite_runtime_vf_queue_number_maxinum.py
index 8e93c20..72644ae 100644
--- a/tests/TestSuite_runtime_vf_queue_number_maxinum.py
+++ b/tests/TestSuite_runtime_vf_queue_number_maxinum.py
@@ -145,7 +145,7 @@ class TestRuntimeVfQnMaxinum(TestCase):
for each vf, when requested queues exceed 4 queues, it need to realloc queues
in the left queues, the reserved queues generally can't be reused.
"""
- pf_eal_param = '-w {} --file-prefix=test1 --socket-mem 1024,1024'.format(self.pf_pci)
+ pf_eal_param = '-a {} --file-prefix=test1 --socket-mem 1024,1024'.format(self.pf_pci)
self.pf_pmdout.start_testpmd(self.pf_pmdout.default_cores, eal_param=pf_eal_param)
vf1_allow_index = 0
vf1_allow_list = ''
@@ -180,9 +180,9 @@ class TestRuntimeVfQnMaxinum(TestCase):
vf_pcis.append(vf.pci)
vf_pcis.sort()
for pci in vf_pcis[:vf1_allow_index]:
- vf1_allow_list = vf1_allow_list + '-w {} '.format(pci)
+ vf1_allow_list = vf1_allow_list + '-a {} '.format(pci)
for pci in vf_pcis[vf1_allow_index:vf1_allow_index+vf3_allow_index]:
- vf3_allow_list = vf3_allow_list + '-w {} '.format(pci)
+ vf3_allow_list = vf3_allow_list + '-a {} '.format(pci)
self.logger.info('vf1 allow list: {}'.format(vf1_allow_list))
self.logger.info('vf3_allow_list: {}'.format(vf3_allow_list))
@@ -196,7 +196,7 @@ class TestRuntimeVfQnMaxinum(TestCase):
self.start_testpmd_multiple_times(self.vf3_pmdout, '--rxq=16 --txq=16', vf3_eal_param)
if vf2_queue_number > 0:
- vf2_eal_param = '-w {} --file-prefix=vf2 --socket-mem 1024,1024'.format(
+ vf2_eal_param = '-a {} --file-prefix=vf2 --socket-mem 1024,1024'.format(
vf_pcis[vf1_allow_index+vf3_allow_index])
self.vf2_pmdout.start_testpmd(self.vf2_pmdout.default_cores, param='--rxq={0} --txq={0}'.format(
vf2_queue_number), eal_param=vf2_eal_param)
@@ -265,7 +265,7 @@ class TestRuntimeVfQnMaxinum(TestCase):
Testpmd should not crash.
"""
# test queue-number-per-vf exceed hardware maximum
- pf_eal_param = '-w {},queue-num-per-vf=16 --file-prefix=test1 --socket-mem 1024,1024'.format(self.pf_pci)
+ pf_eal_param = '-a {},queue-num-per-vf=16 --file-prefix=test1 --socket-mem 1024,1024'.format(self.pf_pci)
out = self.pf_pmdout.start_testpmd(self.pf_pmdout.default_cores, eal_param=pf_eal_param)
self.verify('exceeds the hardware maximum' in out, 'queue number per vf limited error')
out = self.pf_pmdout.execute_cmd('start')
diff --git a/tests/TestSuite_softnic.py b/tests/TestSuite_softnic.py
index e4934ff..2d486a9 100644
--- a/tests/TestSuite_softnic.py
+++ b/tests/TestSuite_softnic.py
@@ -75,7 +75,7 @@ class TestSoftnic(TestCase):
self.dut.session.copy_file_to(self.firmware, self.root_path)
self.dut.session.copy_file_to(self.tm_firmware, self.root_path)
self.dut.session.copy_file_to(self.nat_firmware, self.root_path)
- self.eal_param = " -w %s" % self.dut.ports_info[0]['pci']
+ self.eal_param = " -a %s" % self.dut.ports_info[0]['pci']
self.path = self.dut.apps_name['test-pmd']
self.pmdout = PmdOutput(self.dut)
# get dts output path
diff --git a/tests/TestSuite_sriov_kvm.py b/tests/TestSuite_sriov_kvm.py
index 1a0e3a5..7926516 100644
--- a/tests/TestSuite_sriov_kvm.py
+++ b/tests/TestSuite_sriov_kvm.py
@@ -344,7 +344,7 @@ class TestSriovKvm(TestCase):
if driver == 'igb_uio':
# start testpmd with the two VFs on the host
self.host_testpmd = PmdOutput(self.dut)
- eal_param = '-w %s ' % self.dut.ports_info[0]['pci']
+ eal_param = '-a %s ' % self.dut.ports_info[0]['pci']
self.host_testpmd.start_testpmd(
"1S/2C/2T", "--rxq=4 --txq=4", eal_param=eal_param)
self.host_testpmd.execute_cmd('set fwd rxonly')
diff --git a/tests/TestSuite_stats_checks.py b/tests/TestSuite_stats_checks.py
index 58c5236..c5d1e48 100644
--- a/tests/TestSuite_stats_checks.py
+++ b/tests/TestSuite_stats_checks.py
@@ -301,5 +301,5 @@ class TestStatsChecks(TestCase):
self.vf_port = self.dut.ports_info[self.dut_ports[0]]["vfs_port"][0]
self.vf_port.bind_driver(driver="vfio-pci")
self.vf_port_pci = self.dut.ports_info[self.dut_ports[0]]['sriov_vfs_pci'][0]
- self.pmdout.start_testpmd('default', '--rxq=4 --txq=4', eal_param='-w %s' % self.vf_port_pci)
+ self.pmdout.start_testpmd('default', '--rxq=4 --txq=4', eal_param='-a %s' % self.vf_port_pci)
self.xstats_check(0, 0, if_vf=True)
diff --git a/tests/TestSuite_telemetry.py b/tests/TestSuite_telemetry.py
index 82a1d40..7c9b561 100644
--- a/tests/TestSuite_telemetry.py
+++ b/tests/TestSuite_telemetry.py
@@ -231,7 +231,7 @@ class TestTelemetry(TestCase):
if info['pci'] not in pci_addrs:
continue
self.used_ports.append(index)
- allow_list = ' '.join(['-w ' + pci_addr for pci_addr in pci_addrs])
+ allow_list = ' '.join(['-a ' + pci_addr for pci_addr in pci_addrs])
return allow_list
def start_telemetry_server(self, allowlist=None):
diff --git a/tests/TestSuite_unit_tests_event_timer.py b/tests/TestSuite_unit_tests_event_timer.py
index 22c316e..349c0cf 100644
--- a/tests/TestSuite_unit_tests_event_timer.py
+++ b/tests/TestSuite_unit_tests_event_timer.py
@@ -78,9 +78,9 @@ class TestUnitTestEventTimer(TestCase):
"""
if self.nic == "cavium_a063" or self.nic == "cavium_a064":
- self.dut.send_expect("./%s -n 1 -c %s -w %s,single_ws=1,tim_stats_ena=1" % (self.app_name, self.coremask, self.eventdev_device_bus_id), "R.*T.*E.*>.*>", 60)
+ self.dut.send_expect("./%s -n 1 -c %s -a %s,single_ws=1,tim_stats_ena=1" % (self.app_name, self.coremask, self.eventdev_device_bus_id), "R.*T.*E.*>.*>", 60)
elif self.nic == "cavium_a034":
- self.dut.send_expect("./%s -n 1 -c %s -w %s,timvf_stats=1" % (self.app_name, self.coremask, self.eventdev_timer_device_bus_id), "R.*T.*E.*>.*>", 60)
+ self.dut.send_expect("./%s -n 1 -c %s -a %s,timvf_stats=1" % (self.app_name, self.coremask, self.eventdev_timer_device_bus_id), "R.*T.*E.*>.*>", 60)
out = self.dut.send_expect("event_timer_adapter_test", "RTE>>", 300)
self.dut.send_expect("quit", "# ")
self.verify("Test OK" in out, "Test failed")
diff --git a/tests/TestSuite_vf_l3fwd.py b/tests/TestSuite_vf_l3fwd.py
index a9bc7fa..c881f69 100644
--- a/tests/TestSuite_vf_l3fwd.py
+++ b/tests/TestSuite_vf_l3fwd.py
@@ -128,7 +128,7 @@ class TestVfL3fwd(TestCase):
self.host_testpmd = PmdOutput(self.dut)
eal_param = '--socket-mem=1024,1024 --file-prefix=pf'
for i in valports:
- eal_param += ' -w %s' % self.dut.ports_info[i]['pci']
+ eal_param += ' -a %s' % self.dut.ports_info[i]['pci']
core_config = self.cores[:len(valports)]
self.host_testpmd.start_testpmd(core_config, "", eal_param=eal_param)
for i in valports:
@@ -256,7 +256,7 @@ class TestVfL3fwd(TestCase):
self.setup_vf_env(host_driver, vf_driver)
eal_param = ""
for i in valports:
- eal_param += " -w " + self.sriov_vfs_port[i][0].pci
+ eal_param += " -a " + self.sriov_vfs_port[i][0].pci
port_mask = utils.create_mask(self.dut_ports)
# for fvl40g, fvl25g, use 2c/2q per VF port for performance test ,
diff --git a/tests/TestSuite_vf_single_core_perf.py b/tests/TestSuite_vf_single_core_perf.py
index d40547c..53ee2da 100644
--- a/tests/TestSuite_vf_single_core_perf.py
+++ b/tests/TestSuite_vf_single_core_perf.py
@@ -225,7 +225,7 @@ class TestVfSingleCorePerf(TestCase):
# ports allowlist
eal_para = ""
for i in range(port_num):
- eal_para += " -w " + self.sriov_vfs_port[i][0].pci
+ eal_para += " -a " + self.sriov_vfs_port[i][0].pci
port_mask = utils.create_mask(self.dut_ports)
# parameters for application/testpmd
param = " --portmask=%s" % (port_mask)
diff --git a/tests/TestSuite_vm2vm_virtio_pmd.py b/tests/TestSuite_vm2vm_virtio_pmd.py
index 44f17a3..d730e9c 100644
--- a/tests/TestSuite_vm2vm_virtio_pmd.py
+++ b/tests/TestSuite_vm2vm_virtio_pmd.py
@@ -151,7 +151,7 @@ class TestVM2VMVirtioPMD(TestCase):
"""
# deal with ports
w_pci_list = []
- w_pci_list.append('-w %s,%s' % (virtio_net_pci, 'vectorized=1'))
+ w_pci_list.append('-a %s,%s' % (virtio_net_pci, 'vectorized=1'))
w_pci_str = ' '.join(w_pci_list)
if path_mode == "mergeable":
command = self.app_testpmd_path + " -c 0x3 -n 4 " + \
diff --git a/tests/TestSuite_vmdq.py b/tests/TestSuite_vmdq.py
index 6dae54a..1a170f8 100644
--- a/tests/TestSuite_vmdq.py
+++ b/tests/TestSuite_vmdq.py
@@ -113,7 +113,7 @@ class TestVmdq(TestCase):
port_mask = utils.create_mask(self.dut_ports)
eal_param = ""
for i in self.dut_ports:
- eal_param += " -w %s" % self.dut.ports_info[i]['pci']
+ eal_param += " -a %s" % self.dut.ports_info[i]['pci']
# Run the application
self.dut.send_expect("./%s -c %s -n 4 %s -- -p %s --nb-pools %s --enable-rss" %
(self.app_vmdq_path, core_mask, eal_param, port_mask, str(npools)), "reading queues", 120)
diff --git a/tests/TestSuite_vmdq_dcb.py b/tests/TestSuite_vmdq_dcb.py
index 2a248f8..20d03dd 100644
--- a/tests/TestSuite_vmdq_dcb.py
+++ b/tests/TestSuite_vmdq_dcb.py
@@ -107,7 +107,7 @@ class TestVmdqDcb(TestCase):
port_mask = utils.create_mask(self.dut_ports)
eal_param = ""
for i in self.dut_ports:
- eal_param += " -w %s" % self.dut.ports_info[i]['pci']
+ eal_param += " -a %s" % self.dut.ports_info[i]['pci']
# Run the application
app_name = self.dut.apps_name['vmdq_dcb']
command = app_name + "-c %s -n 4 %s -- -p %s --nb-pools %s --nb-tcs %s " \
diff --git a/tests/flexible_common.py b/tests/flexible_common.py
index 9df62ca..3a701db 100644
--- a/tests/flexible_common.py
+++ b/tests/flexible_common.py
@@ -340,7 +340,7 @@ class FlexibleRxdBase(object):
cmd = (
"-l 1,2,3 "
"-n {mem_channel} "
- "-w {pci},{param_type}=vxlan "
+ "-a {pci},{param_type}=vxlan "
"-- -i "
"{port_opt}").format(**{
'mem_channel': self.dut.get_memory_channels(),
@@ -364,7 +364,7 @@ class FlexibleRxdBase(object):
cmd = (
"-l 1,2,3 "
"-n {mem_channel} "
- "-w {pci} "
+ "-a {pci} "
"--log-level='ice,8' "
"-- -i "
"{port_opt}").format(**{
@@ -501,7 +501,7 @@ class FlexibleRxdBase(object):
def check_effect_replace_pkg_RXID_22_to_RXID_16(self):
self.logger.info("replace ice-1.3.7.0.pkg with RXID 16")
self.replace_pkg('os_default')
- out = self.__pmdout.start_testpmd(cores="1S/4C/1T", param='--rxq=64 --txq=64', eal_param=f"-w {self.__pci}")
+ out = self.__pmdout.start_testpmd(cores="1S/4C/1T", param='--rxq=64 --txq=64', eal_param=f"-a {self.__pci}")
self.verify("Fail to start port 0" in out, "RXID #16 not support start testpmd")
self.__pmdout.execute_cmd("quit", "# ")
self.replace_pkg('comms')
diff --git a/tests/perf_test_base.py b/tests/perf_test_base.py
index 02a73ee..00cf80c 100644
--- a/tests/perf_test_base.py
+++ b/tests/perf_test_base.py
@@ -743,7 +743,7 @@ class PerfTestBase(object):
self.__is_pmd_on = False
def __get_topo_option(self):
- port_num = len(re.findall('-w', self.__bin_ps_allow_list)) \
+ port_num = len(re.findall('-a', self.__bin_ps_allow_list)) \
if self.__bin_ps_allow_list else len(self.__valports)
return 'loop' if port_num == 1 else 'chained'
@@ -1410,16 +1410,16 @@ class PerfTestBase(object):
pci = self.dut.ports_info[port_index].get('pci')
if not pci:
continue
- allowlist += '-w {} '.format(pci)
+ allowlist += '-a {} '.format(pci)
else:
- allowlist = ''.join(['-w {} '.format(pci)
+ allowlist = ''.join(['-a {} '.format(pci)
for _, info in self.__vf_ports_info.items()
for pci in info.get('vfs_pci')])
return allowlist
def __get_host_testpmd_allowlist(self):
- allowlist = ''.join(['-w {} '.format(info.get('pf_pci'))
+ allowlist = ''.join(['-a {} '.format(info.get('pf_pci'))
for _, info in self.__vf_ports_info.items()])
return allowlist
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dts] [PATCH V1 2/3] test_plans/*: changed eal -w parameter to -a
2021-10-13 9:24 [dts] [PATCH V1 1/3] tests/*: changed eal -w parameter to -a Jun Dong
@ 2021-10-13 9:24 ` Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 3/3] conf/*: " Jun Dong
1 sibling, 0 replies; 4+ messages in thread
From: Jun Dong @ 2021-10-13 9:24 UTC (permalink / raw)
To: dts; +Cc: PingX.Yu, weix.ling, junx.dong
- change eal parameter -w to -a for all test plans
Signed-off-by: Jun Dong <junx.dong@intel.com>
---
test_plans/ABI_stable_test_plan.rst | 2 +-
test_plans/cloud_filter_with_l4_port_test_plan.rst | 4 +-
| 2 +-
| 2 +-
| 2 +-
| 4 +-
| 4 +-
test_plans/cvl_dcf_acl_filter_test_plan.rst | 20 +++---
test_plans/cvl_dcf_date_path_test_plan.rst | 4 +-
.../cvl_dcf_switch_filter_pppoe_test_plan.rst | 2 +-
test_plans/cvl_dcf_switch_filter_test_plan.rst | 10 +--
test_plans/cvl_fdir_test_plan.rst | 4 +-
test_plans/cvl_limit_value_test_test_plan.rst | 18 +++---
test_plans/cvl_switch_filter_pppoe_test_plan.rst | 4 +-
test_plans/cvl_switch_filter_test_plan.rst | 4 +-
test_plans/ddp_l2tpv3_test_plan.rst | 2 +-
test_plans/dpdk_hugetlbfs_mount_size_test_plan.rst | 18 +++---
...le_package_download_in_ice_driver_test_plan.rst | 4 +-
test_plans/eventdev_perf_test_plan.rst | 38 ++++++------
test_plans/eventdev_pipeline_perf_test_plan.rst | 38 ++++++------
test_plans/flexible_rxd_test_plan.rst | 24 ++++----
test_plans/floating_veb_test_plan.rst | 38 ++++++------
| 2 +-
test_plans/generic_flow_api_test_plan.rst | 36 +++++------
test_plans/iavf_flexible_descriptor_test_plan.rst | 22 +++----
.../iavf_package_driver_error_handle_test_plan.rst | 2 +-
test_plans/iavf_test_plan.rst | 10 +--
test_plans/inline_ipsec_test_plan.rst | 22 +++----
test_plans/ip_pipeline_test_plan.rst | 14 ++---
test_plans/ipsec_gw_and_library_test_plan.rst | 8 +--
test_plans/l2tp_esp_coverage_test_plan.rst | 12 ++--
test_plans/linux_modules_test_plan.rst | 4 +-
test_plans/macsec_for_ixgbe_test_plan.rst | 6 +-
...malicious_driver_event_indication_test_plan.rst | 10 +--
test_plans/pmd_test_plan.rst | 2 +-
test_plans/port_representor_test_plan.rst | 6 +-
test_plans/qinq_filter_test_plan.rst | 12 ++--
.../runtime_vf_queue_number_maxinum_test_plan.rst | 8 +--
test_plans/runtime_vf_queue_number_test_plan.rst | 28 ++++-----
test_plans/unit_tests_dump_test_plan.rst | 2 +-
test_plans/unit_tests_event_timer_test_plan.rst | 2 +-
test_plans/veb_switch_test_plan.rst | 30 ++++-----
test_plans/vf_l3fwd_test_plan.rst | 2 +-
test_plans/vf_macfilter_test_plan.rst | 8 +--
test_plans/vf_packet_rxtx_test_plan.rst | 4 +-
test_plans/vf_pf_reset_test_plan.rst | 6 +-
test_plans/vf_vlan_test_plan.rst | 2 +-
.../vhost_virtio_user_interrupt_test_plan.rst | 4 +-
test_plans/virtio_pvp_regression_test_plan.rst | 4 +-
test_plans/vm2vm_virtio_pmd_test_plan.rst | 72 +++++++++++-----------
50 files changed, 294 insertions(+), 294 deletions(-)
diff --git a/test_plans/ABI_stable_test_plan.rst b/test_plans/ABI_stable_test_plan.rst
index c15af72..16934c4 100644
--- a/test_plans/ABI_stable_test_plan.rst
+++ b/test_plans/ABI_stable_test_plan.rst
@@ -292,7 +292,7 @@ Build shared libraries, (just enable i40e pmd for testing)::
Run testpmd application refer to Common Test steps with ixgbe pmd NIC.::
- testpmd -c 0xf -n 4 -d <dpdk_2002> -w 18:00.0 -- -i
+ testpmd -c 0xf -n 4 -d <dpdk_2002> -a 18:00.0 -- -i
Test txonly::
diff --git a/test_plans/cloud_filter_with_l4_port_test_plan.rst b/test_plans/cloud_filter_with_l4_port_test_plan.rst
index da39c9a..ed2109e 100644
--- a/test_plans/cloud_filter_with_l4_port_test_plan.rst
+++ b/test_plans/cloud_filter_with_l4_port_test_plan.rst
@@ -49,7 +49,7 @@ Prerequisites
./usertools/dpdk-devbind.py --force --bind=vfio-pci 0000:81:00.0
4.Launch the testpmd::
- ./testpmd -l 0-3 -n 4 -w 81:00.0 --file-prefix=test -- -i --rxq=16 --txq=16 --disable-rss
+ ./testpmd -l 0-3 -n 4 -a 81:00.0 --file-prefix=test -- -i --rxq=16 --txq=16 --disable-rss
testpmd> set fwd rxonly
testpmd> set promisc all off
testpmd> set verbose 1
@@ -517,4 +517,4 @@ Test Case 3: NEGATIVE_TEST
create conflicted rules::
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 156 / end actions pf / queue index 2 / end
- Verify rules can not create.
\ No newline at end of file
+ Verify rules can not create.
--git a/test_plans/cvl_advanced_iavf_rss_gtpu_test_plan.rst b/test_plans/cvl_advanced_iavf_rss_gtpu_test_plan.rst
index 909c722..7d99f77 100644
--- a/test_plans/cvl_advanced_iavf_rss_gtpu_test_plan.rst
+++ b/test_plans/cvl_advanced_iavf_rss_gtpu_test_plan.rst
@@ -213,7 +213,7 @@ Prerequisites
5. Launch the testpmd to configuration queue of rx and tx number 16 in DUT::
- testpmd>./x86_64-native-linuxapp-gcc/app/testpmd -c 0xff -n 4 -w 0000:18:01.0 -- -i --rxq=16 --txq=16
+ testpmd>./x86_64-native-linuxapp-gcc/app/testpmd -c 0xff -n 4 -a 0000:18:01.0 -- -i --rxq=16 --txq=16
testpmd>set fwd rxonly
testpmd>set verbose 1
--git a/test_plans/cvl_advanced_iavf_rss_test_plan.rst b/test_plans/cvl_advanced_iavf_rss_test_plan.rst
index 27c8792..0e0a810 100644
--- a/test_plans/cvl_advanced_iavf_rss_test_plan.rst
+++ b/test_plans/cvl_advanced_iavf_rss_test_plan.rst
@@ -349,7 +349,7 @@ Prerequisites
5. Launch the testpmd to configuration queue of rx and tx number 16 in DUT::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xff -n 4 -w 0000:18:01.0 -- -i --rxq=16 --txq=16
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xff -n 4 -a 0000:18:01.0 -- -i --rxq=16 --txq=16
testpmd>set fwd rxonly
testpmd>set verbose 1
--git a/test_plans/cvl_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst b/test_plans/cvl_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
index b120986..b9229f1 100644
--- a/test_plans/cvl_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
+++ b/test_plans/cvl_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
@@ -119,7 +119,7 @@ Prerequisites
6. Launch the testpmd to configuration queue of rx and tx number 16 in DUT::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xff -n 4 -w 0000:18:01.0 -- -i --rxq=16 --txq=16
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xff -n 4 -a 0000:18:01.0 -- -i --rxq=16 --txq=16
testpmd>set fwd rxonly
testpmd>set verbose 1
--git a/test_plans/cvl_advanced_rss_pppoe_test_plan.rst b/test_plans/cvl_advanced_rss_pppoe_test_plan.rst
index e209e0d..829a94d 100644
--- a/test_plans/cvl_advanced_rss_pppoe_test_plan.rst
+++ b/test_plans/cvl_advanced_rss_pppoe_test_plan.rst
@@ -125,7 +125,7 @@ Prerequisites
5. Launch the testpmd in DUT for cases with toeplitz hash function::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 -- -i --rxq=16 --txq=16 --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 -- -i --rxq=16 --txq=16 --disable-rss
testpmd> port config 0 rss-hash-key ipv4 1b9d58a4b961d9cd1c56ad1621c3ad51632c16a5d16c21c3513d132c135d132c13ad1531c23a51d6ac49879c499d798a7d949c8a
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -133,7 +133,7 @@ Prerequisites
Launch testpmd for cases with symmetric_toeplitz and simple_xor hash function::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 -- -i --rxq=16 --txq=16
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 -- -i --rxq=16 --txq=16
6. on tester side, copy the layer python file to /root::
--git a/test_plans/cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst b/test_plans/cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
index a0c232d..bc2db46 100644
--- a/test_plans/cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
+++ b/test_plans/cvl_advanced_rss_vlan_esp_ah_l2tp_pfcp_test_plan.rst
@@ -116,7 +116,7 @@ Prerequisites
5. Launch the testpmd in DUT for cases with toeplitz hash function::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 -- -i --rxq=16 --txq=16 --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 -- -i --rxq=16 --txq=16 --disable-rss
testpmd> port config 0 rss-hash-key ipv4 1b9d58a4b961d9cd1c56ad1621c3ad51632c16a5d16c21c3513d132c135d132c13ad1531c23a51d6ac49879c499d798a7d949c8a
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -124,7 +124,7 @@ Prerequisites
Launch testpmd for cases with symmetric_toeplitz and simple_xor hash function::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 -- -i --rxq=16 --txq=16
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 -- -i --rxq=16 --txq=16
6. on tester side, copy the layer python file to /root::
diff --git a/test_plans/cvl_dcf_acl_filter_test_plan.rst b/test_plans/cvl_dcf_acl_filter_test_plan.rst
index 378514d..c74dcb6 100644
--- a/test_plans/cvl_dcf_acl_filter_test_plan.rst
+++ b/test_plans/cvl_dcf_acl_filter_test_plan.rst
@@ -95,7 +95,7 @@ Prerequisites
9. Launch dpdk on VF0, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf -n 4 -w 0000:86:01.0,cap=dcf --file-prefix=vf0 --log-level="ice,7" -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf -n 4 -a 0000:86:01.0,cap=dcf --file-prefix=vf0 --log-level="ice,7" -- -i
testpmd> set fwd mac
testpmd> set verbose 1
testpmd> start
@@ -106,7 +106,7 @@ Prerequisites
10. Launch dpdk on VF1::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -w 86:01.1 --file-prefix=vf1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -a 86:01.1 --file-prefix=vf1 -- -i
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -118,7 +118,7 @@ Prerequisites
or launch one testpmd on VF0 and VF1::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf -n 4 -w 0000:86:01.0,cap=dcf -w 86:01.1 --file-prefix=vf0 --log-level="ice,7" -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf -n 4 -a 0000:86:01.0,cap=dcf -a 86:01.1 --file-prefix=vf0 --log-level="ice,7" -- -i
Common steps of basic cases
===========================
@@ -516,11 +516,11 @@ while we can create 256 ipv4-udp/ipv4-tcp/ipv4-sctp rules at most.
1. launch DPDK on VF0, request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -w 86:01.0,cap=dcf -- -i --port-topology=loop
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -a 86:01.0,cap=dcf -- -i --port-topology=loop
Launch dpdk on VF1::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -w 86:01.1 --file-prefix=vf1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -a 86:01.1 --file-prefix=vf1 -- -i
2. create a full mask rule, it's created as a switch rule::
@@ -592,11 +592,11 @@ Test Case 6: max entry number ipv4-other
========================================
1. launch DPDK on VF0, request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -w 86:01.0,cap=dcf -- -i --port-topology=loop
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -a 86:01.0,cap=dcf -- -i --port-topology=loop
Launch dpdk on VF1::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -w 86:01.1 --file-prefix=vf1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -a 86:01.1 --file-prefix=vf1 -- -i
2. create a full mask rule, it's created as a switch rule::
@@ -669,11 +669,11 @@ Test Case 7: max entry number combined patterns
===============================================
1. launch DPDK on VF0, request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -w 86:01.0,cap=dcf -- -i --port-topology=loop
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -a 86:01.0,cap=dcf -- -i --port-topology=loop
Launch dpdk on VF1::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -w 86:01.1 --file-prefix=vf1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf0 -n 4 -a 86:01.1 --file-prefix=vf1 -- -i
2. create 32 ipv4-other ACL rules::
@@ -912,7 +912,7 @@ Test Case 11: switch/acl/fdir/rss rules combination
===================================================
1. launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -w 86:01.0,cap=dcf -w 86:01.1 --log-level="ice,7" -- -i --port-topology=loop --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xc -n 4 -a 86:01.0,cap=dcf -a 86:01.1 --log-level="ice,7" -- -i --port-topology=loop --rxq=4 --txq=4
2. create rules::
diff --git a/test_plans/cvl_dcf_date_path_test_plan.rst b/test_plans/cvl_dcf_date_path_test_plan.rst
index 380090f..a5f8bdf 100755
--- a/test_plans/cvl_dcf_date_path_test_plan.rst
+++ b/test_plans/cvl_dcf_date_path_test_plan.rst
@@ -17,7 +17,7 @@ Set a VF as trust ::
Launch dpdk on the VF, request DCF mode ::
./usertools/dpdk-devbind.py -b vfio-pci 18:01.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-10 -n 4 -w 18:01.0,cap=dcf --file-prefix=vf -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-10 -n 4 -a 18:01.0,cap=dcf --file-prefix=vf -- -i
Test Case: Launch DCF and do macfwd
@@ -200,4 +200,4 @@ Test Case: Measure performance of DCF interface
The steps are same to iAVF performance test, a slight difference on
launching testpmd devarg. DCF need cap=dcf option.
-Expect the performance is same to iAVF
\ No newline at end of file
+Expect the performance is same to iAVF
diff --git a/test_plans/cvl_dcf_switch_filter_pppoe_test_plan.rst b/test_plans/cvl_dcf_switch_filter_pppoe_test_plan.rst
index 0149b46..d781f73 100644
--- a/test_plans/cvl_dcf_switch_filter_pppoe_test_plan.rst
+++ b/test_plans/cvl_dcf_switch_filter_pppoe_test_plan.rst
@@ -201,7 +201,7 @@ Prerequisites
9. Launch dpdk on VF0 and VF1, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -- -i
testpmd> set portlist 1
testpmd> set fwd rxonly
testpmd> set verbose 1
diff --git a/test_plans/cvl_dcf_switch_filter_test_plan.rst b/test_plans/cvl_dcf_switch_filter_test_plan.rst
index 116b2cc..76857e4 100644
--- a/test_plans/cvl_dcf_switch_filter_test_plan.rst
+++ b/test_plans/cvl_dcf_switch_filter_test_plan.rst
@@ -231,7 +231,7 @@ Prerequisites
9. Launch dpdk on VF0 and VF1, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -- -i
testpmd> set portlist 1
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -2392,7 +2392,7 @@ Subcase 1: add existing rules but with different vfs
1. Launch dpdk on VF0, VF1 and VF2, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -w 0000:18:01.2 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -a 0000:18:01.2 -- -i
testpmd> set portlist 1,2
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -2454,7 +2454,7 @@ Subcase 3: add two rules with one rule's input set included in the other
1. Launch dpdk on VF0, VF1 and VF2, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -w 0000:18:01.2 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -a 0000:18:01.2 -- -i
testpmd> set portlist 1,2
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -2617,7 +2617,7 @@ are dropped.
1. Launch dpdk on VF0, VF1 and VF2, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -w 0000:18:01.2 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -a 0000:18:01.2 -- -i
testpmd> set portlist 1,2
testpmd> set fwd mac
testpmd> set verbose 1
@@ -2688,7 +2688,7 @@ This case is designed based on 4*25G NIC.
6. launch dpdk on VF0, and request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -- -i
7. set a switch rule to each VF from DCF, totally 63 rules::
diff --git a/test_plans/cvl_fdir_test_plan.rst b/test_plans/cvl_fdir_test_plan.rst
index 2458601..64d06f1 100644
--- a/test_plans/cvl_fdir_test_plan.rst
+++ b/test_plans/cvl_fdir_test_plan.rst
@@ -145,7 +145,7 @@ Prerequisites
5. Launch the app ``testpmd`` with the following arguments::
- ./testpmd -c 0xff -n 6 -w 86:00.0 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
+ ./testpmd -c 0xff -n 6 -a 86:00.0 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -156,7 +156,7 @@ Prerequisites
Notes: if need two ports environment, launch ``testpmd`` with the following arguments::
- ./testpmd -c 0xff -n 6 -w 86:00.0 -w 86:00.1 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
+ ./testpmd -c 0xff -n 6 -a 86:00.0 -a 86:00.1 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
Default parameters
------------------
diff --git a/test_plans/cvl_limit_value_test_test_plan.rst b/test_plans/cvl_limit_value_test_test_plan.rst
index 9fec36f..160b126 100644
--- a/test_plans/cvl_limit_value_test_test_plan.rst
+++ b/test_plans/cvl_limit_value_test_test_plan.rst
@@ -91,7 +91,7 @@ Prerequisites
5. Launch the app ``testpmd`` with the following arguments::
- ./testpmd -c 0xff -n 6 -w 86:01.0 -w 86:01.1 --file-prefix=vf -- -i --rxq=16 --txq=16
+ ./testpmd -c 0xff -n 6 -a 86:01.0 -a 86:01.1 --file-prefix=vf -- -i --rxq=16 --txq=16
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -159,7 +159,7 @@ if 2 vfs generated by 2 pf port, each vf can create 14336 rules at most.
1. start testpmd on vf00::
- ./testpmd -c 0xf -n 6 -w 86:01.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
+ ./testpmd -c 0xf -n 6 -a 86:01.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
create 1 rule on vf00::
@@ -169,7 +169,7 @@ if 2 vfs generated by 2 pf port, each vf can create 14336 rules at most.
2. start testpmd on vf10::
- ./testpmd -c 0xf0 -n 6 -w 86:0a.0 --file-prefix=vf10 -- -i --rxq=4 --txq=4
+ ./testpmd -c 0xf0 -n 6 -a 86:0a.0 --file-prefix=vf10 -- -i --rxq=4 --txq=4
create 14336 rules on vf10::
@@ -218,7 +218,7 @@ this card can create (2048 + 14336)*2=32768 rules.
2. start testpmd on vf00::
- ./testpmd -c 0xf -n 6 -w 86:01.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
+ ./testpmd -c 0xf -n 6 -a 86:01.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
create 1 rule on vf00::
@@ -228,7 +228,7 @@ this card can create (2048 + 14336)*2=32768 rules.
2. start testpmd on vf10::
- ./testpmd -c 0xf0 -n 6 -w 86:0a.0 --file-prefix=vf10 -- -i --rxq=4 --txq=4
+ ./testpmd -c 0xf0 -n 6 -a 86:0a.0 --file-prefix=vf10 -- -i --rxq=4 --txq=4
create 14335 rules on vf10::
@@ -289,7 +289,7 @@ so if create 16384 rules on pf1,check failed to create rule on vf00 and vf10(vf0
3. start testpmd on vf00 and vf10::
- ./testpmd -c 0xf -n 6 -w 86:01.0 -w 86:11.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
+ ./testpmd -c 0xf -n 6 -a 86:01.0 -a 86:11.0 --file-prefix=vf00 -- -i --rxq=4 --txq=4
create 1 rule on vf00::
@@ -435,7 +435,7 @@ Prerequisites
5. Launch the app ``testpmd`` with the following arguments::
- ./testpmd -c 0xff -n 6 -w 86:00.0 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
+ ./testpmd -c 0xff -n 6 -a 86:00.0 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -446,7 +446,7 @@ Prerequisites
Notes: if need two ports environment, launch ``testpmd`` with the following arguments::
- ./testpmd -c 0xff -n 6 -w 86:00.0 -w 86:00.1 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
+ ./testpmd -c 0xff -n 6 -a 86:00.0 -a 86:00.1 --log-level="ice,7" -- -i --portmask=0xff --rxq=64 --txq=64 --port-topology=loop
Test case: add/delete rules
============================
@@ -529,7 +529,7 @@ Prerequisites
8. Launch dpdk on VF0 and VF1, and VF0 request DCF mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:01.0,cap=dcf -w 0000:18:01.1 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:01.0,cap=dcf -a 0000:18:01.1 -- -i
testpmd> set portlist 1
testpmd> set fwd rxonly
testpmd> set verbose 1
diff --git a/test_plans/cvl_switch_filter_pppoe_test_plan.rst b/test_plans/cvl_switch_filter_pppoe_test_plan.rst
index f63965b..897e8c6 100644
--- a/test_plans/cvl_switch_filter_pppoe_test_plan.rst
+++ b/test_plans/cvl_switch_filter_pppoe_test_plan.rst
@@ -203,7 +203,7 @@ Prerequisites
6. Launch dpdk with the following arguments in non-pipeline mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 --log-level="ice,8" -- -i --txq=16 --rxq=16 --cmdline-file=testpmd_fdir_rules
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 --log-level="ice,8" -- -i --txq=16 --rxq=16 --cmdline-file=testpmd_fdir_rules
testpmd> port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -217,7 +217,7 @@ Prerequisites
Launch dpdk in pipeline mode with the following testpmd command line::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0,pipeline-mode-support=1 --log-level="ice,8" -- -i --txq=16 --rxq=16
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0,pipeline-mode-support=1 --log-level="ice,8" -- -i --txq=16 --rxq=16
Test case: Ethertype filter
===========================
diff --git a/test_plans/cvl_switch_filter_test_plan.rst b/test_plans/cvl_switch_filter_test_plan.rst
index 992aa6c..ae29e64 100644
--- a/test_plans/cvl_switch_filter_test_plan.rst
+++ b/test_plans/cvl_switch_filter_test_plan.rst
@@ -181,7 +181,7 @@ Prerequisites
6. Launch dpdk with the following arguments in non-pipeline mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0 --log-level="ice,8" -- -i --txq=16 --rxq=16 --cmdline-file=testpmd_fdir_rules
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0 --log-level="ice,8" -- -i --txq=16 --rxq=16 --cmdline-file=testpmd_fdir_rules
testpmd> port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -195,7 +195,7 @@ Prerequisites
Launch dpdk in pipeline mode with the following testpmd command line::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:18:00.0,pipeline-mode-support=1 --log-level="ice,8" -- -i --txq=16 --rxq=16
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 0000:18:00.0,pipeline-mode-support=1 --log-level="ice,8" -- -i --txq=16 --rxq=16
Test case: VXLAN non-pipeline mode
==================================
diff --git a/test_plans/ddp_l2tpv3_test_plan.rst b/test_plans/ddp_l2tpv3_test_plan.rst
index 6d3952f..8262da3 100644
--- a/test_plans/ddp_l2tpv3_test_plan.rst
+++ b/test_plans/ddp_l2tpv3_test_plan.rst
@@ -100,7 +100,7 @@ any DDP functionality*
5. Start the TESTPMD::
- ./x86_64-native-linuxapp-gcc/build/app/test-pmd/testpmd -c f -n 4 -w
+ ./x86_64-native-linuxapp-gcc/build/app/test-pmd/testpmd -c f -n 4 -a
<PCI address of device> -- -i --port-topology=chained --txq=64 --rxq=64
--pkt-filter-mode=perfect
diff --git a/test_plans/dpdk_hugetlbfs_mount_size_test_plan.rst b/test_plans/dpdk_hugetlbfs_mount_size_test_plan.rst
index 6d0eb88..218b960 100644
--- a/test_plans/dpdk_hugetlbfs_mount_size_test_plan.rst
+++ b/test_plans/dpdk_hugetlbfs_mount_size_test_plan.rst
@@ -51,14 +51,14 @@ Test Case 1: default hugepage size w/ and w/o numa
2. Bind one nic port to igb_uio driver, launch testpmd::
- ./testpmd -c 0x3 -n 4 --huge-dir /mnt/huge --file-prefix=abc -- -i
+ ./dpdk-testpmd -c 0x3 -n 4 --huge-dir /mnt/huge --file-prefix=abc -- -i
testpmd>start
3. Send packet with packet generator, check testpmd could forward packets correctly.
4. Goto step 2 resart testpmd with numa support::
- ./testpmd -c 0x3 -n 4 --huge-dir /mnt/huge --file-prefix=abc -- -i --numa
+ ./dpdk-testpmd -c 0x3 -n 4 --huge-dir /mnt/huge --file-prefix=abc -- -i --numa
testpmd>start
5. Send packets with packet generator, make sure testpmd could receive and fwd packets correctly.
@@ -73,10 +73,10 @@ Test Case 2: mount size exactly match total hugepage size with two mount points
2. Bind two nic ports to igb_uio driver, launch testpmd with numactl::
- numactl --membind=1 ./testpmd -l 31-32 -n 4 --legacy-mem --socket-mem 0,2048 --huge-dir /mnt/huge1 --file-prefix=abc -w 82:00.0 -- -i --socket-num=1 --no-numa
+ numactl --membind=1 ./dpdk-testpmd -l 31-32 -n 4 --legacy-mem --socket-mem 0,2048 --huge-dir /mnt/huge1 --file-prefix=abc -a 82:00.0 -- -i --socket-num=1 --no-numa
testpmd>start
- numactl --membind=1 ./testpmd -l 33-34 -n 4 --legacy-mem --socket-mem 0,2048 --huge-dir /mnt/huge2 --file-prefix=bcd -w 82:00.1 -- -i --socket-num=1 --no-numa
+ numactl --membind=1 ./dpdk-testpmd -l 33-34 -n 4 --legacy-mem --socket-mem 0,2048 --huge-dir /mnt/huge2 --file-prefix=bcd -a 82:00.1 -- -i --socket-num=1 --no-numa
testpmd>start
3. Send packets with packet generator, make sure two testpmd could receive and fwd packets correctly.
@@ -90,7 +90,7 @@ Test Case 3: mount size greater than total hugepage size with single mount point
2. Bind one nic port to igb_uio driver, launch testpmd::
- ./testpmd -c 0x3 -n 4 --legacy-mem --huge-dir /mnt/huge --file-prefix=abc -- -i
+ ./dpdk-testpmd -c 0x3 -n 4 --legacy-mem --huge-dir /mnt/huge --file-prefix=abc -- -i
testpmd>start
3. Send packets with packet generator, make sure testpmd could receive and fwd packets correctly.
@@ -106,13 +106,13 @@ Test Case 4: mount size greater than total hugepage size with multiple mount poi
2. Bind one nic port to igb_uio driver, launch testpmd::
- numactl --membind=0 ./testpmd -c 0x3 -n 4 --legacy-mem --socket-mem 2048,0 --huge-dir /mnt/huge1 --file-prefix=abc -- -i --socket-num=0 --no-numa
+ numactl --membind=0 ./dpdk-testpmd -c 0x3 -n 4 --legacy-mem --socket-mem 2048,0 --huge-dir /mnt/huge1 --file-prefix=abc -- -i --socket-num=0 --no-numa
testpmd>start
- numactl --membind=0 ./testpmd -c 0xc -n 4 --legacy-mem --socket-mem 2048,0 --huge-dir /mnt/huge2 --file-prefix=bcd -- -i --socket-num=0 --no-numa
+ numactl --membind=0 ./dpdk-testpmd -c 0xc -n 4 --legacy-mem --socket-mem 2048,0 --huge-dir /mnt/huge2 --file-prefix=bcd -- -i --socket-num=0 --no-numa
testpmd>start
- numactl --membind=0 ./testpmd -c 0x30 -n 4 --legacy-mem --socket-mem 1024,0 --huge-dir /mnt/huge3 --file-prefix=fgh -- -i --socket-num=0 --no-numa
+ numactl --membind=0 ./dpdk-testpmd -c 0x30 -n 4 --legacy-mem --socket-mem 1024,0 --huge-dir /mnt/huge3 --file-prefix=fgh -- -i --socket-num=0 --no-numa
testpmd>start
3. Send packets with packet generator, check first and second testpmd will start correctly while third one will report error with not enough mem in socket 0.
@@ -124,6 +124,6 @@ Test Case 5: run dpdk app in limited hugepages controlled by cgroup
cgcreate -g hugetlb:/test-subgroup
cgset -r hugetlb.1GB.limit_in_bytes=2147483648 test-subgroup
- cgexec -g hugetlb:test-subgroup numactl -m 1 ./testpmd -c 0x3000 -n 4 -- -i --socket-num=1 --no-numa
+ cgexec -g hugetlb:test-subgroup numactl -m 1 ./dpdk-testpmd -c 0x3000 -n 4 -- -i --socket-num=1 --no-numa
2. Start testpmd and send packets with packet generator, make sure testpmd could receive and fwd packets correctly.
diff --git a/test_plans/enable_package_download_in_ice_driver_test_plan.rst b/test_plans/enable_package_download_in_ice_driver_test_plan.rst
index 4139191..578ba30 100644
--- a/test_plans/enable_package_download_in_ice_driver_test_plan.rst
+++ b/test_plans/enable_package_download_in_ice_driver_test_plan.rst
@@ -104,7 +104,7 @@ Test case 2: Driver enters Safe Mode successfully
2. Start testpmd::
./testpmd -c 0x3fe -n 6 \
- -w PORT0_PCI,safe-mode-support=1 -w PORT1_PCI,safe-mode-support=1 \
+ -a PORT0_PCI,safe-mode-support=1 -a PORT1_PCI,safe-mode-support=1 \
-- -i --nb-cores=8 --rxq=8 --txq=8 --port-topology=chained
There will be an error reported::
@@ -176,7 +176,7 @@ Compile DPDK and testpmd::
Launch testpmd with 1 default interface and 1 specific interface::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0 -w b1:00.0 --log-level=8 -- -i
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0 -a b1:00.0 --log-level=8 -- -i
In this case, b1:00.0 interface is specific interface.
diff --git a/test_plans/eventdev_perf_test_plan.rst b/test_plans/eventdev_perf_test_plan.rst
index d5fe4ed..f8e8153 100644
--- a/test_plans/eventdev_perf_test_plan.rst
+++ b/test_plans/eventdev_perf_test_plan.rst
@@ -49,14 +49,14 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
Parameters::
-l CORELIST : List of cores to run on
The argument format is <c1>[-c2][,c3[-c4],...]
where c1, c2, etc are core indexes between 0 and 24
- -w --pci-allowlist : Add a PCI device in allow list.
+ -a --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -76,7 +76,7 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -88,7 +88,7 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -100,7 +100,7 @@ Description: Execute performance test with Atomic_queue type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -112,7 +112,7 @@ Description: Execute performance test with Parallel_queue type of stage in multi
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -124,7 +124,7 @@ Description: Execute performance test with Ordered_queue type of stage in multi-
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -136,7 +136,7 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -148,7 +148,7 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -160,7 +160,7 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -172,7 +172,7 @@ Description: Execute performance test with Atomic_queue type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -184,7 +184,7 @@ Description: Execute performance test with Parallel_queue type of stage in multi
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -196,7 +196,7 @@ Description: Execute performance test with Ordered_queue type of stage in multi-
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -209,7 +209,7 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -w device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=A --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -221,7 +221,7 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -233,7 +233,7 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_atq --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -245,7 +245,7 @@ Description: Execute performance test with Atomic_queue type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=A --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -257,7 +257,7 @@ Description: Execute performance test with Parallel_queue type of stage in multi
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=P --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
@@ -269,7 +269,7 @@ Description: Execute performance test with Ordered_queue type of stage in multi-
1. Run the sample with below command::
- # ./build/dpdk-test-eventdev -l 22-23 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
+ # ./build/dpdk-test-eventdev -l 22-23 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- --prod_type_ethdev --nb_pkts=0 --verbose 2 --test=pipeline_queue --stlist=O --wlcores=23
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
diff --git a/test_plans/eventdev_pipeline_perf_test_plan.rst b/test_plans/eventdev_pipeline_perf_test_plan.rst
index abeab18..34464ab 100644
--- a/test_plans/eventdev_pipeline_perf_test_plan.rst
+++ b/test_plans/eventdev_pipeline_perf_test_plan.rst
@@ -51,12 +51,12 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device_bus_id -- -w 0xc00000 -n=0 --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device_bus_id -- -w 0xc00000 -n=0 --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -75,12 +75,12 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device_bus_id -- -w 0xc00000 -n=0 -p --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device_bus_id -- -w 0xc00000 -n=0 -p --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -100,12 +100,12 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device_bus_id -- -w 0xc00000 -n=0 -o --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device_bus_id -- -w 0xc00000 -n=0 -o --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -125,12 +125,12 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- -w 0xc00000 -n=0 --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- -w 0xc00000 -n=0 --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -149,12 +149,12 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- -w 0xc00000 -n=0 -p --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- -w 0xc00000 -n=0 -p --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -174,12 +174,12 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -- -w 0xc00000 -n=0 -o --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -- -w 0xc00000 -n=0 -o --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -199,12 +199,12 @@ Description: Execute performance test with Atomic_atq type of stage in multi-flo
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- -w 0xc00000 -n=0 --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- -w 0xc00000 -n=0 --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -223,12 +223,12 @@ Description: Execute performance test with Parallel_atq type of stage in multi-f
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- -w 0xc00000 -n=0 -p --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- -w 0xc00000 -n=0 -p --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -248,12 +248,12 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
1. Run the sample with below command::
- # ./build/dpdk-eventdev_pipeline -c 0xe00000 -w eventdev_device_bus_id -w device0_bus_id -w device1_bus_id -w device2_bus_id -w device3_bus_id -- -w 0xc00000 -n=0 -o --dump
+ # ./build/dpdk-eventdev_pipeline -c 0xe00000 -a eventdev_device_bus_id -a device0_bus_id -a device1_bus_id -a device2_bus_id -a device3_bus_id -- -w 0xc00000 -n=0 -o --dump
Parameters::
-c, COREMASK : Hexadecimal bitmask of cores to run on
- -w, --pci-allowlist : Add a PCI device in allow list.
+ -a, --pci-allowlist : Add a PCI device in allow list.
Only use the specified PCI devices. The argument format
is <[domain:]bus:devid.func>. This option can be present
several times (once per device).
@@ -265,4 +265,4 @@ Description: Execute performance test with Ordered_atq type of stage in multi-fl
2. Use Ixia to send huge number of packets(with same 5-tuple and different 5-tuple)
-3. Observe the speed of packets received(Rx-rate) on Ixia.
\ No newline at end of file
+3. Observe the speed of packets received(Rx-rate) on Ixia.
diff --git a/test_plans/flexible_rxd_test_plan.rst b/test_plans/flexible_rxd_test_plan.rst
index b2ca34a..30ae699 100644
--- a/test_plans/flexible_rxd_test_plan.rst
+++ b/test_plans/flexible_rxd_test_plan.rst
@@ -94,13 +94,13 @@ Test Case 01: Check single VLAN fields in RXD (802.1Q)
Launch testpmd by::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
testpmd>set verbose 1
testpmd>set fwd io
testpmd>start
-Please change the core setting (-l option) and port's PCI (-w option) \
+Please change the core setting (-l option) and port's PCI (-a option) \
by your DUT environment
Send a packet with VLAN tag from test network interface::
@@ -130,7 +130,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -148,7 +148,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -167,7 +167,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=vlan -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -186,7 +186,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=ipv4 -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=ipv4 -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -208,7 +208,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=ipv6 -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=ipv6 -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -230,7 +230,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=ipv6_flow -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=ipv6_flow -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -250,7 +250,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=tcp -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=tcp -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -269,7 +269,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr=tcp -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr=tcp -- -i --rxq=32 --txq=32 --portmask=0x1 --nb-cores=2
Test packet::
@@ -288,7 +288,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
Launch testpmd command::
- ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -w 18:00.0,proto_xtr='[(2):ipv4,(3):ipv6,(4):tcp]' -- -i --rxq=64 --txq=64 --portmask=0x1
+ ./x86_64-native-linux-gcc/app/testpmd -l 6-9 -n 4 -a 18:00.0,proto_xtr='[(2):ipv4,(3):ipv6,(4):tcp]' -- -i --rxq=64 --txq=64 --portmask=0x1
Create generic flow on NIC::
@@ -360,7 +360,7 @@ Test steps are same to ``Test Case 01``, just change the launch command of testp
MPLS cases use same parameter Launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=ip_offset -- -i --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=ip_offset -- -i --portmask=0x1 --nb-cores=2
check RXDID value correct::
diff --git a/test_plans/floating_veb_test_plan.rst b/test_plans/floating_veb_test_plan.rst
index 17b8231..1522b53 100644
--- a/test_plans/floating_veb_test_plan.rst
+++ b/test_plans/floating_veb_test_plan.rst
@@ -124,14 +124,14 @@ MAC switch when PF is link down as well as up.
1. Launch PF testpmd::
./testpmd -c 0xf -n 4 --socket-mem 1024,1024
- -w 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i
+ -a 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i
testpmd> port start all
testpmd> show port info all
2. VF1, run testpmd::
./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024
- -w 05:02.0 --file-prefix=test2 -- -i --crc-strip
+ -a 05:02.0 --file-prefix=test2 -- -i --crc-strip
testpmd> mac_addr add 0 vf1_mac_address
testpmd> set fwd rxonly
testpmd> set promisc all off
@@ -140,7 +140,7 @@ MAC switch when PF is link down as well as up.
VF2, run testpmd::
- ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -w 05:02.1 --file-prefix=test3
+ ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -a 05:02.1 --file-prefix=test3
-- -i --crc-strip --eth-peer=0,vf1_mac_address
testpmd> set fwd txonly
testpmd> start
@@ -162,7 +162,7 @@ send traffic from VF0 to PF, PF can't receive any packets either.
1. In PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i
testpmd> set fwd rxonly
testpmd> set promisc all off
testpmd> port start all
@@ -171,7 +171,7 @@ send traffic from VF0 to PF, PF can't receive any packets either.
2. VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.0 --file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.0 --file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
testpmd> set fwd txonly
testpmd> start
testpmd> show port stats all
@@ -193,7 +193,7 @@ in floating mode, check VF1 can't receive traffic from tester.
2. PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i --eth-peer=0,VF_mac_address
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 05:00.0,enable_floating_veb=1 --file-prefix=test1 -- -i --eth-peer=0,VF_mac_address
testpmd> set fwd mac
testpmd> port start all
testpmd> start
@@ -201,7 +201,7 @@ in floating mode, check VF1 can't receive traffic from tester.
VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.0 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.0 --file-prefix=test2 -- -i
testpmd> set fwd rxonly
testpmd> start
testpmd> show port stats all
@@ -237,7 +237,7 @@ Details:
1. Launch PF testpmd, run testpmd with floating parameters and make the link down::
./testpmd -c 0xf -n 4 --socket-mem 1024,1024 \
- \"-w "05:00.0,enable_floating_veb=1,floating_veb_list=0;2-3\" \
+ \"-a "05:00.0,enable_floating_veb=1,floating_veb_list=0;2-3\" \
--file-prefix=test1 -- -i
//VF0, VF2 and VF3in floating VEB, VF1 in legacy VEB
@@ -251,7 +251,7 @@ Details:
VF0, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.0 \
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.0 \
--file-prefix=test2 -- -i --eth-peer=0,vf1_mac_address
testpmd> set fwd rxonly
testpmd> mac_addr add 0 vf0_mac_address //set the vf0_mac_address
@@ -260,7 +260,7 @@ Details:
VF1, run testpmd::
- ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -w 05:02.1 \
+ ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -a 05:02.1 \
--file-prefix=test3 -- -i --eth-peer=0,vf1_mac_address
testpmd> set fwd txonly
testpmd> mac_addr add 0 vf1_mac_addres
@@ -275,7 +275,7 @@ Details:
VF2, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.2 \
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.2 \
--file-prefix=test2 -- -i
testpmd> set fwd rxonly
testpmd> mac_addr add 0 vf2_mac_addres
@@ -284,7 +284,7 @@ Details:
VF0, run testpmd::
- ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -w 05:02.0 \
+ ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -a 05:02.0 \
--file-prefix=test3 -- -i --eth-peer=0,vf2_mac_address
testpmd> set fwd txonly
testpmd> start
@@ -319,7 +319,7 @@ Details:
1. In PF, launch testpmd::
./testpmd -c 0xf -n 4 --socket-mem 1024,1024 \
- \"-w 05:00.0,enable_floating_veb=1,floating_veb_list=0;3\" \
+ \"-a 05:00.0,enable_floating_veb=1,floating_veb_list=0;3\" \
--file-prefix=test1 -- -i
testpmd> set fwd rxonly
testpmd> port start all
@@ -328,7 +328,7 @@ Details:
2. VF0, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.0 \
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.0 \
--file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
testpmd> set fwd txonly
testpmd> start
@@ -337,7 +337,7 @@ Details:
3. VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.1 \
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.1 \
--file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
testpmd> set fwd txonly
testpmd> start
@@ -346,7 +346,7 @@ Details:
4. VF0, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.0 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.0 --file-prefix=test2 -- -i
testpmd> mac_addr add 0 VF0_mac_address
testpmd> set promisc all off
testpmd> set fwd rxonly
@@ -361,7 +361,7 @@ Details:
5. VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.1 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.1 --file-prefix=test2 -- -i
testpmd> mac_addr add 0 VF1_mac_address
testpmd> set promisc all off
testpmd> set fwd rxonly
@@ -376,7 +376,7 @@ Details:
6. VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 05:02.1 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 05:02.1 --file-prefix=test2 -- -i
testpmd> mac_addr add 0 VF1_mac_address
testpmd> set promisc all off
testpmd> set fwd rxonly
@@ -384,7 +384,7 @@ Details:
VF2, run testpmd::
- ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -w 05:02.2 \
+ ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -a 05:02.2 \
--file-prefix=test3 -- -i --eth-peer=0,VF1_mac_address
testpmd> set fwd txonly
testpmd> start
--git a/test_plans/fortville_rss_input_test_plan.rst b/test_plans/fortville_rss_input_test_plan.rst
index a73b1b5..0202f74 100644
--- a/test_plans/fortville_rss_input_test_plan.rst
+++ b/test_plans/fortville_rss_input_test_plan.rst
@@ -54,7 +54,7 @@ Prerequisites
2.Start testpmd on host::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 81:00.0 -- -i --txq=8 --rxq=8
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -a 81:00.0 -- -i --txq=8 --rxq=8
testpmd>set verbose 1
testpmd>start
diff --git a/test_plans/generic_flow_api_test_plan.rst b/test_plans/generic_flow_api_test_plan.rst
index 760f8e8..30a5510 100644
--- a/test_plans/generic_flow_api_test_plan.rst
+++ b/test_plans/generic_flow_api_test_plan.rst
@@ -99,7 +99,7 @@ Test case: Fortville fdir for L2 payload
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -133,7 +133,7 @@ Test case: Fortville fdir for flexbytes
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -217,17 +217,17 @@ Test case: Fortville fdir for ipv4
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -w 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -a 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -w 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -a 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -322,17 +322,17 @@ Test case: Fortville fdir for ipv6
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -w 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -a 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -w 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -a 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -401,7 +401,7 @@ Test case: Fortville fdir wrong parameters
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss --pkt-filter-mode=perfect
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -461,7 +461,7 @@ Test case: Fortville tunnel vxlan
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --tx-offloads=0x8fff --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --tx-offloads=0x8fff --disable-rss
testpmd> rx_vxlan_port add 4789 0
testpmd> set fwd rxonly
testpmd> set verbose 1
@@ -469,7 +469,7 @@ Test case: Fortville tunnel vxlan
testpmd> start
the pf's mac address is 00:00:00:00:01:00
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -w 05:02.0 --file-prefix=vf --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -a 05:02.0 --file-prefix=vf --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff --disable-rss
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> set promisc all off
@@ -564,19 +564,19 @@ Test case: Fortville tunnel nvgre
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --tx-offloads=0x8fff
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> set promisc all off
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -w 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -a 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> set promisc all off
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -w 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -a 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --tx-offloads=0x8fff
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> set promisc all off
@@ -816,17 +816,17 @@ Test case: IXGBE L2-tunnel(supported by x552 and x550)
1. Launch the app ``testpmd`` with the following arguments::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -w 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1ffff -n 4 -a 05:00.0 --file-prefix=pf --socket-mem=1024,1024 -- -i --rxq=16 --txq=16 --disable-rss
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -w 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e0000 -n 4 -a 05:02.0 --file-prefix=vf0 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -w 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 1e00000 -n 4 -a 05:02.1 --file-prefix=vf1 --socket-mem=1024,1024 -- -i --rxq=4 --txq=4 --disable-rss
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
@@ -1445,7 +1445,7 @@ Test case: Fortville fdir for l2 mac
./usertools/dpdk-devbind.py -b igb_uio 0000:81:00.0
launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -w 0000:81:00.0 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -a 0000:81:00.0 -- -i --rxq=4 --txq=4
1. basic test for ipv4-other
diff --git a/test_plans/iavf_flexible_descriptor_test_plan.rst b/test_plans/iavf_flexible_descriptor_test_plan.rst
index ae28865..d03fbe4 100644
--- a/test_plans/iavf_flexible_descriptor_test_plan.rst
+++ b/test_plans/iavf_flexible_descriptor_test_plan.rst
@@ -129,7 +129,7 @@ VLAN cases
1. Launch testpmd by::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=vlan -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=vlan -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
testpmd>set verbose 1
testpmd>set fwd io
testpmd>start
@@ -139,7 +139,7 @@ VLAN cases
expected: RXDID[17]
.. note::
- Please change the core setting (-l option) and port's PCI (-w option) by your DUT environment
+ Please change the core setting (-l option) and port's PCI (-a option) by your DUT environment
Test Case: Check single VLAN fields in RXD (802.1Q)
---------------------------------------------------
@@ -218,7 +218,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=ipv4 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=ipv4 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
check RXDID value correct::
@@ -244,7 +244,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=ipv6 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=ipv6 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
check RXDID value correct::
@@ -270,7 +270,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=ipv6_flow -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=ipv6_flow -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
check RXDID value correct::
@@ -294,7 +294,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=tcp -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=tcp -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
check RXDID value correct::
@@ -317,7 +317,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=tcp -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=tcp -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
check RXDID value correct::
@@ -340,7 +340,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
Launch testpmd command::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr='[(2):ipv4,(3):ipv6,(4):tcp]' -- -i --rxq=16 --txq=16 --portmask=0x1
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr='[(2):ipv4,(3):ipv6,(4):tcp]' -- -i --rxq=16 --txq=16 --portmask=0x1
check RXDID value correct::
@@ -385,13 +385,13 @@ Test steps are same to ``VLAN cases``, use different "proto_xtr" parameters the
use error parameter Launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=vxlan -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=vxlan -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
testpmd can't started, check "iavf_lookup_flex_desc_type(): wrong flex_desc type, it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ovs|ip_offset" in testpmd output.
don't use parameter launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0 -- -i --rxq=4 --txq=4 --portmask=0x1 --nb-cores=2
testpmd started, check "iavf_configure_queues(): request RXDID[16] in Queue[0]" in testpmd output
@@ -403,7 +403,7 @@ Test steps are same to ``VLAN cases``, just change the launch command of testpmd
MPLS cases use same parameter Launch testpmd::
- ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -w af:01.0,proto_xtr=ip_offset -- -i --portmask=0x1 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 6-9 -n 4 -a af:01.0,proto_xtr=ip_offset -- -i --portmask=0x1 --nb-cores=2
check RXDID value correct::
diff --git a/test_plans/iavf_package_driver_error_handle_test_plan.rst b/test_plans/iavf_package_driver_error_handle_test_plan.rst
index 23978ea..fe95e29 100644
--- a/test_plans/iavf_package_driver_error_handle_test_plan.rst
+++ b/test_plans/iavf_package_driver_error_handle_test_plan.rst
@@ -78,7 +78,7 @@ Test Case 1: Check old driver and latest commes pkg compatibility
./usertools/dpdk-devbind.py -b vfio-pci 0000:b1:01.0
4. Launch the testpmd
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -w b1:01.0 --file-prefix=vf -- -i --rxq=16 --txq=16 --nb-cores=2
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -a b1:01.0 --file-prefix=vf -- -i --rxq=16 --txq=16 --nb-cores=2
5. Create a rss rule
testpmd> flow create 0 ingress pattern eth / ipv4 / end actions rss types l3-dst-only end key_len 0 queues end / end
diff --git a/test_plans/iavf_test_plan.rst b/test_plans/iavf_test_plan.rst
index 4855e17..f79214d 100644
--- a/test_plans/iavf_test_plan.rst
+++ b/test_plans/iavf_test_plan.rst
@@ -425,7 +425,7 @@ create 2 VFs from 1 PF, and start PF::
echo 2 > /sys/bus/pci/devices/0000\:08\:00.0/max_vfs;
./usertools/dpdk-devbind.py --bind=vfio-pci 09:02.0 09:0a.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 1,2 -n 4 --socket-mem=1024,1024 --file-prefix=pf -w 08:00.0 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 1,2 -n 4 --socket-mem=1024,1024 --file-prefix=pf -a 08:00.0 -- -i
testpmd>set vf mac addr 0 0 00:12:34:56:78:01
testpmd>set vf mac addr 0 1 00:12:34:56:78:02
@@ -433,7 +433,7 @@ create 2 VFs from 1 PF, and start PF::
start testpmd with 2VFs individually::
./x86_64-native-linuxapp-gcc/app/testpmd -l 3-5 -n 4 --master-lcore=3 --socket-mem=1024,1024 --file-prefix=vf1 \
- -w 09:02.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=2 --rss-ip --eth-peer=0,00:12:34:56:78:02
+ -a 09:02.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=2 --rss-ip --eth-peer=0,00:12:34:56:78:02
testpmd>set promisc all off
testpmd>set fwd mac
@@ -442,7 +442,7 @@ start testpmd with 2VFs individually::
::
./x86_64-native-linuxapp-gcc/app/testpmd -l 6-8 -n 4 --master-lcore=6 --socket-mem=1024,1024 --file-prefix=vf2 \
- -w 09:0a.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=2 --rss-ip
+ -a 09:0a.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=2 --rss-ip
testpmd>set promisc all off
testpmd>set fwd mac
@@ -461,7 +461,7 @@ Test Case: vector vf performance
2. start testpmd for PF::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x6 -n 4 --socket-mem=1024,1024 --file-prefix=pf \
- -w 08:00.0 -w 08:00.1 -- -i
+ -a 08:00.0 -a 08:00.1 -- -i
testpmd>set vf mac addr 0 0 00:12:34:56:78:01
testpmd>set vf mac addr 1 0 00:12:34:56:78:02
@@ -469,7 +469,7 @@ Test Case: vector vf performance
3. start testpmd for VF::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f8 -n 4 --master-lcore=3 --socket-mem=1024,1024 --file-prefix=vf \
- -w 09:0a.0 -w 09:02.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=4 --rss-ip
+ -a 09:0a.0 -a 09:02.0 -- -i --txq=2 --rxq=2 --rxd=512 --txd=512 --nb-cores=4 --rss-ip
testpmd>set promisc all off
testpmd>set fwd mac
diff --git a/test_plans/inline_ipsec_test_plan.rst b/test_plans/inline_ipsec_test_plan.rst
index 3a26e18..11dfeda 100644
--- a/test_plans/inline_ipsec_test_plan.rst
+++ b/test_plans/inline_ipsec_test_plan.rst
@@ -144,7 +144,7 @@ Test Case: IPSec Encryption
===========================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(1,0,21)" -f ./enc.cfg
@@ -194,7 +194,7 @@ Test Case: IPSec Encryption with Jumboframe
===========================================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(1,0,21)" -f ./enc.cfg
@@ -214,7 +214,7 @@ Check burst esp packets can't be received from unprotected port.
Set jumbo frames size as 9000, start it with port 1 assigned to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 -j 9000 --config="(0,0,20),(1,0,21)" -f ./enc.cfg
@@ -239,7 +239,7 @@ Create configuration file with multiple SP/SA/RT rules for different ip address.
Start ipsec-secgw with two queues enabled on each port and port 1 assigned to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(0,1,20),(1,0,21),(1,1,21)" -f ./enc_rss.cfg
@@ -259,7 +259,7 @@ Test Case: IPSec Decryption
===========================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(1,0,21)" -f ./dec.cfg
@@ -275,7 +275,7 @@ Test Case: IPSec Decryption with wrong key
==========================================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(1,0,21)" -f ./dec.cfg
@@ -295,7 +295,7 @@ IPsec application will produce error "IPSEC_ESP: failed crypto op".
Test Case: IPSec Decryption with Jumboframe
===========================================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 --config="(0,0,20),(1,0,21)" -f ./dec.cfg
@@ -312,7 +312,7 @@ Check burst(8192) packets which have been decapsulated can't be received from pr
Set jumbo frames size as 9000, start it with port 1 assigned to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
"crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 -j 9000 --config="(0,0,20),(1,0,21)" -f ./dec.cfg
@@ -334,8 +334,8 @@ Create configuration file with multiple SA rule for different ip address.
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1 --vdev
- "crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1 --vdev
+ "crypto_null" --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u
0x2 -config="(0,0,20),(0,1,20),(1,0,21),(1,1,21)" -f ./dec_rss.cfg
Send two burst(32) esp packets with different ip to unprotected port.
@@ -351,7 +351,7 @@ Test Case: IPSec Encryption/Decryption simultaneously
=====================================================
Start ipsec-secgw with two 82599 ports and assign port 1 to unprotected mode::
- sudo ./build/ipsec-secgw -l 20,21 -w 83:00.0 -w 83:00.1
+ sudo ./build/ipsec-secgw -l 20,21 -a 83:00.0 -a 83:00.1
--vdev "crypto_null" --log-level 8 --socket-mem 1024,1
-- -p 0xf -P -u 0x2 --config="(0,0,20),(1,0,21)" -f ./enc_dec.cfg
diff --git a/test_plans/ip_pipeline_test_plan.rst b/test_plans/ip_pipeline_test_plan.rst
index 0b6bb5b..1c774e3 100644
--- a/test_plans/ip_pipeline_test_plan.rst
+++ b/test_plans/ip_pipeline_test_plan.rst
@@ -178,7 +178,7 @@ Test Case: traffic management pipeline
3. Run ip_pipeline app as the following::
- ./build/ip_pipeline -c 0x3 -n 4 -w 0000:81:00.0 -- -s examples/traffic_manager.cli
+ ./build/ip_pipeline -c 0x3 -n 4 -a 0000:81:00.0 -- -s examples/traffic_manager.cli
4. Config traffic with dst ipaddr increase from 0.0.0.0 to 15.255.0.0, total 4096 streams,
also config flow tracked-by dst ipaddr, verify each flow's throughput is about linerate/4096.
@@ -220,7 +220,7 @@ Test Case: vf l2fwd pipeline(pf bound to dpdk driver)
2. Start testpmd with the four pf ports::
- ./testpmd -c 0xf0 -n 4 -w 05:00.0 -w 05:00.1 -w 05:00.2 -w 05:00.3 --file-prefix=pf --socket-mem 1024,1024 -- -i
+ ./testpmd -c 0xf0 -n 4 -a 05:00.0 -a 05:00.1 -a 05:00.2 -a 05:00.3 --file-prefix=pf --socket-mem 1024,1024 -- -i
Set vf mac address from pf port::
@@ -235,8 +235,8 @@ Test Case: vf l2fwd pipeline(pf bound to dpdk driver)
4. Run ip_pipeline app as the following::
- ./build/ip_pipeline -c 0x3 -n 4 -w 0000:05:02.0 -w 0000:05:06.0 \
- -w 0000:05:0a.0 -w 0000:05:0e.0 --file-prefix=vf --socket-mem 1024,1024 -- -s examples/vf.cli
+ ./build/ip_pipeline -c 0x3 -n 4 -a 0000:05:02.0 -a 0000:05:06.0 \
+ -a 0000:05:0a.0 -a 0000:05:0e.0 --file-prefix=vf --socket-mem 1024,1024 -- -s examples/vf.cli
The exact format of port allowlist: domain:bus:devid:func
@@ -331,7 +331,7 @@ Test Case: crypto pipeline - AEAD algorithm in aesni_gcm
4. Run ip_pipeline app as the following::
- ./examples/ip_pipeline/build/ip_pipeline -w 0000:81:00.0 --vdev crypto_aesni_gcm0
+ ./examples/ip_pipeline/build/ip_pipeline -a 0000:81:00.0 --vdev crypto_aesni_gcm0
--socket-mem 0,2048 -l 23,24,25 -- -s ./examples/ip_pipeline/examples/flow_crypto.cli
5. Send packets with IXIA port,
@@ -365,7 +365,7 @@ Test Case: crypto pipeline - cipher algorithm in aesni_mb
4. Run ip_pipeline app as the following::
- ./examples/ip_pipeline/build/ip_pipeline -w 0000:81:00.0 --vdev crypto_aesni_mb0 --socket-mem 0,2048 -l 23,24,25 -- -s ./examples/ip_pipeline/examples/flow_crypto.cli
+ ./examples/ip_pipeline/build/ip_pipeline -a 0000:81:00.0 --vdev crypto_aesni_mb0 --socket-mem 0,2048 -l 23,24,25 -- -s ./examples/ip_pipeline/examples/flow_crypto.cli
5. Send packets with IXIA port,
Use a tool to caculate the ciphertext from plaintext and key as an expected value.
@@ -395,7 +395,7 @@ Test Case: crypto pipeline - cipher_auth algorithm in aesni_mb
4. Run ip_pipeline app as the following::
- ./examples/ip_pipeline/build/ip_pipeline -w 0000:81:00.0 --vdev crypto_aesni_mb0 --socket-mem 0,2048 -l 23,24,25 -- -s ./examples/ip_pipeline/examples/flow_crypto.cli
+ ./examples/ip_pipeline/build/ip_pipeline -a 0000:81:00.0 --vdev crypto_aesni_mb0 --socket-mem 0,2048 -l 23,24,25 -- -s ./examples/ip_pipeline/examples/flow_crypto.cli
5. Send packets with IXIA port,
Use a tool to caculate the ciphertext from plaintext and cipher key with AES-CBC algorithm.
diff --git a/test_plans/ipsec_gw_and_library_test_plan.rst b/test_plans/ipsec_gw_and_library_test_plan.rst
index fac2a7b..74bf407 100644
--- a/test_plans/ipsec_gw_and_library_test_plan.rst
+++ b/test_plans/ipsec_gw_and_library_test_plan.rst
@@ -202,7 +202,7 @@ Cryptodev AES-NI algorithm validation matrix is showed in table below.
AESNI_MB device start cmd::
- ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem -w 0000:60:00.0
+ ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem -a 0000:60:00.0
--vdev=net_tap0,mac=fixed --vdev crypto_aesni_mb_pmd_1 --vdev=crypto_aesni_mb_pmd_2 -l 9,10,11 -n 6 -- -P --config "(0,0,10),(1,0,11)"
-u 0x1 -p 0x3 -f /root/dts/local_conf/ipsec_test.cfg
@@ -230,8 +230,8 @@ Cryptodev QAT algorithm validation matrix is showed in table below.
QAT device start cmd::
- ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem --vdev=net_tap0,mac=fixed -w 0000:60:00.0
- -w 0000:1a:01.0 -l 9,10,11 -n 6 -- -P --config "(0,0,10),(1,0,11)" -u 0x1 -p 0x3
+ ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem --vdev=net_tap0,mac=fixed -a 0000:60:00.0
+ -a 0000:1a:01.0 -l 9,10,11 -n 6 -- -P --config "(0,0,10),(1,0,11)" -u 0x1 -p 0x3
-f /root/dts/local_conf/ipsec_test.cfg
AES_GCM_PMD algorithm validation matrix is showed in table below.
@@ -244,7 +244,7 @@ AES_GCM_PMD algorithm validation matrix is showed in table below.
AESNI_GCM device start cmd::
- ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem -w 0000:60:00.0 --vdev=net_tap0,mac=fixed
+ ./examples/ipsec-secgw/build/ipsec-secgw --socket-mem 2048,0 --legacy-mem -a 0000:60:00.0 --vdev=net_tap0,mac=fixed
--vdev crypto_aesni_gcm_pmd_1 --vdev=crypto_aesni_gcm_pmd_2 -l 9,10,11 -n 6 -- -P --config "(0,0,10),(1,0,11)"
-u 0x1 -p 0x3 -f /root/dts/local_conf/ipsec_test.cfg
diff --git a/test_plans/l2tp_esp_coverage_test_plan.rst b/test_plans/l2tp_esp_coverage_test_plan.rst
index 4998f0f..a768684 100644
--- a/test_plans/l2tp_esp_coverage_test_plan.rst
+++ b/test_plans/l2tp_esp_coverage_test_plan.rst
@@ -88,7 +88,7 @@ Test Case 1: test MAC_IPV4_L2TPv3 HW checksum offload
1. DUT enable rx checksum with "--enable-rx-cksum" when start testpmd::
- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -w af:01.0 -- -i --enable-rx-cksum
+ ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -a af:01.0 -- -i --enable-rx-cksum
2. DUT setup csum forwarding mode::
@@ -163,7 +163,7 @@ Test Case 2: test MAC_IPV4_ESP HW checksum offload
1. DUT enable rx checksum with "--enable-rx-cksum" when start testpmd, setup csum forwarding mode::
- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -w af:01.0 -- -i --enable-rx-cksum
+ ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -a af:01.0 -- -i --enable-rx-cksum
2. DUT setup csum forwarding mode::
@@ -1095,7 +1095,7 @@ Test Case 14: MAC_IPV4_L2TPv3 vlan strip on + HW checksum offload check
The pre-steps are as l2tp_esp_iavf_test_plan.
-1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -w af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
+1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -a af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
2. DUT create fdir rules for MAC_IPV4_L2TPv3 with queue index and mark::
@@ -1189,7 +1189,7 @@ The pre-steps are as l2tp_esp_iavf_test_plan.
Test Case 15: MAC_IPV4_L2TPv3 vlan insert on + SW checksum offload check
========================================================================
-1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -w af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
+1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -a af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
2. DUT create fdir rules for MAC_IPV4_L2TPv3 with queue index and mark::
@@ -1279,7 +1279,7 @@ Test Case 16: MAC_IPV4_ESP vlan strip on + HW checksum offload check
The pre-steps are as l2tp_esp_iavf_test_plan.
-1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -w af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
+1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -a af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
2. DUT create fdir rules for MAC_IPV4_ESP with queue index and mark::
@@ -1372,7 +1372,7 @@ The pre-steps are as l2tp_esp_iavf_test_plan.
Test Case 17: MAC_IPV6_NAT-T-ESP vlan insert on + SW checksum offload check
===========================================================================
-1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -w af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
+1. ./x86_64-native-linuxapp-gcc/app/testpmd -l 6-9 -n 4 -a af:01.0 -- -i --rxq=16 --txq=16 --portmask=0x1 --nb-cores=2 --enable-rx-cksum
2. DUT create fdir rules for MAC_IPV6_NAT-T-ESP with queue index and mark::
diff --git a/test_plans/linux_modules_test_plan.rst b/test_plans/linux_modules_test_plan.rst
index 3f286ab..57b0327 100644
--- a/test_plans/linux_modules_test_plan.rst
+++ b/test_plans/linux_modules_test_plan.rst
@@ -80,7 +80,7 @@ Bind the interface to the driver ::
Start testpmd in a loop configuration ::
- # x86_64-native-linux-gcc/app/testpmd -l 1,2 -n 4 -w xxxx:xx:xx.x \
+ # x86_64-native-linux-gcc/app/testpmd -l 1,2 -n 4 -a xxxx:xx:xx.x \
-- -i --port-topology=loop
Start packet forwarding ::
@@ -122,7 +122,7 @@ Grant permissions for all users to access the new character device ::
Start testpmd in a loop configuration ::
- $ x86_64-native-linux-gcc/app/testpmd -l 1,2 -n 4 -w xxxx:xx:xx.x --in-memory \
+ $ x86_64-native-linux-gcc/app/testpmd -l 1,2 -n 4 -a xxxx:xx:xx.x --in-memory \
-- -i --port-topology=loop
Start packet forwarding ::
diff --git a/test_plans/macsec_for_ixgbe_test_plan.rst b/test_plans/macsec_for_ixgbe_test_plan.rst
index 997f921..660c2fd 100644
--- a/test_plans/macsec_for_ixgbe_test_plan.rst
+++ b/test_plans/macsec_for_ixgbe_test_plan.rst
@@ -113,7 +113,7 @@ Test Case 1: MACsec packets send and receive
1. Start the testpmd of rx port::
- ./testpmd -c 0xf --socket-mem 1024,0 --file-prefix=rx -w 0000:07:00.1 \
+ ./testpmd -c 0xf --socket-mem 1024,0 --file-prefix=rx -a 0000:07:00.1 \
-- -i --port-topology=chained
2. Set MACsec offload on::
@@ -150,7 +150,7 @@ Test Case 1: MACsec packets send and receive
1. Start the testpmd of tx port::
- ./testpmd -c 0xf0 --socket-mem 1024,0 --file-prefix=tx -w 0000:07:00.0 \
+ ./testpmd -c 0xf0 --socket-mem 1024,0 --file-prefix=tx -a 0000:07:00.0 \
-- -i --port-topology=chained
2. Set MACsec offload on::
@@ -422,7 +422,7 @@ Test Case 7: performance test of MACsec offload packets
with cable, connect 05:00.0 to IXIA. Bind the three ports to dpdk driver.
Start two testpmd::
- ./testpmd -c 0xf --socket-mem 1024,0 --file-prefix=rx -w 0000:07:00.1 \
+ ./testpmd -c 0xf --socket-mem 1024,0 --file-prefix=rx -a 0000:07:00.1 \
-- -i --port-topology=chained
testpmd> set macsec offload 0 on encrypt on replay-protect on
diff --git a/test_plans/malicious_driver_event_indication_test_plan.rst b/test_plans/malicious_driver_event_indication_test_plan.rst
index dfeb783..1c9d244 100644
--- a/test_plans/malicious_driver_event_indication_test_plan.rst
+++ b/test_plans/malicious_driver_event_indication_test_plan.rst
@@ -62,10 +62,10 @@ Test Case1: Check log output when malicious driver events is detected
echo 1 > /sys/bus/pci/devices/0000\:18\:00.1/max_vfs
2. Launch PF by testpmd
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=test1 -w [pci of PF] -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=test1 -a [pci of PF] -- -i
3. Launch VF by testpmd
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=lei1 -w [pci of VF] -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=lei1 -a [pci of VF] -- -i
> set fwd txonly
> start
@@ -83,14 +83,14 @@ Test Case2: Check the event counter number for malicious driver events
echo 1 > /sys/bus/pci/devices/0000\:18\:00.1/max_vfs
2. Launch PF by testpmd
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=test1 -w [pci of PF] -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=test1 -a [pci of PF] -- -i
3. launch VF by testpmd and start txonly mode 3 times:
repeat following step 3 times
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=lei1 -w [pci of VF] -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x03 -n 4 --file-prefix=lei1 -a [pci of VF] -- -i
> set fwd txonly
> start
> quit
4. Check the PF can detect the malicious driver events number directly in the log:
- i40e_handle_mdd_event(): TX driver issue detected on VF 0 3times
\ No newline at end of file
+ i40e_handle_mdd_event(): TX driver issue detected on VF 0 3times
diff --git a/test_plans/pmd_test_plan.rst b/test_plans/pmd_test_plan.rst
index 4017a4a..58be7d5 100644
--- a/test_plans/pmd_test_plan.rst
+++ b/test_plans/pmd_test_plan.rst
@@ -105,7 +105,7 @@ Test Case: Packet Checking in scalar mode
The linuxapp is started with the following parameters:
::
- -c 0x6 -n 4 -w <devid>,scalar_enable=1 -- -i --portmask=<portmask>
+ -c 0x6 -n 4 -a <devid>,scalar_enable=1 -- -i --portmask=<portmask>
This test is applicable for Marvell devices. The tester sends 1 packet at a
diff --git a/test_plans/port_representor_test_plan.rst b/test_plans/port_representor_test_plan.rst
index c54b7ec..5f6ff1c 100644
--- a/test_plans/port_representor_test_plan.rst
+++ b/test_plans/port_representor_test_plan.rst
@@ -59,13 +59,13 @@ Create two VFs and two VFs representor ports which are used as control plane.
4. start a testpmd with create 2 VFs representor ports as control plane named testpmd-pf::
- ./testpmd --lcores 1,2 -n 4 -w af:00.0,representor=0-1 --socket-mem 1024,1024 \
+ ./testpmd --lcores 1,2 -n 4 -a af:00.0,representor=0-1 --socket-mem 1024,1024 \
--proc-type auto --file-prefix testpmd-pf -- -i --port-topology=chained
5. start two testpmd as dataplane named testpmd-vf0/testpmd-vf1(case 3 run later)::
- ./testpmd --lcores 3,4 -n 4 -w af:02.0 --socket-mem 1024,1024 --proc-type auto --file-prefix testpmd-vf0 -- -i
- ./testpmd --lcores 5,6 -n 4 -w af:02.1 --socket-mem 1024,1024 --proc-type auto --file-prefix testpmd-vf1 -- -i
+ ./testpmd --lcores 3,4 -n 4 -a af:02.0 --socket-mem 1024,1024 --proc-type auto --file-prefix testpmd-vf0 -- -i
+ ./testpmd --lcores 5,6 -n 4 -a af:02.1 --socket-mem 1024,1024 --proc-type auto --file-prefix testpmd-vf1 -- -i
Note: Every case needs to restart testpmd.
diff --git a/test_plans/qinq_filter_test_plan.rst b/test_plans/qinq_filter_test_plan.rst
index bd4e284..7b0a8d1 100644
--- a/test_plans/qinq_filter_test_plan.rst
+++ b/test_plans/qinq_filter_test_plan.rst
@@ -134,7 +134,7 @@ Test Case 3: qinq packet filter to VF queues
#. set up testpmd with fortville PF NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 --socket-mem=1024,1024 --file-prefix=pf -w 81:00.0 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 --socket-mem=1024,1024 --file-prefix=pf -a 81:00.0 -- -i --rxq=4 --txq=4
#. enable qinq::
@@ -160,7 +160,7 @@ Test Case 3: qinq packet filter to VF queues
#. set up testpmd with fortville VF0 NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3e0 -n 4 --socket-mem=1024,1024 --file-prefix=vf0 -w 81:02.0 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3e0 -n 4 --socket-mem=1024,1024 --file-prefix=vf0 -a 81:02.0 -- -i --rxq=4 --txq=4
#. PMD fwd only receive the packets::
@@ -176,7 +176,7 @@ Test Case 3: qinq packet filter to VF queues
#. set up testpmd with fortville VF1 NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x7c0 -n 4 --socket-mem=1024,1024 --file-prefix=vf1 -w 81:02.1 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x7c0 -n 4 --socket-mem=1024,1024 --file-prefix=vf1 -a 81:02.1 -- -i --rxq=4 --txq=4
#. PMD fwd only receive the packets::
@@ -211,7 +211,7 @@ Test Case 4: qinq packet filter with different tpid
#. set up testpmd with fortville PF NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 --socket-mem=1024,1024 --file-prefix=pf -w 81:00.0 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 --socket-mem=1024,1024 --file-prefix=pf -a 81:00.0 -- -i --rxq=4 --txq=4
#. enable qinq::
@@ -241,7 +241,7 @@ Test Case 4: qinq packet filter with different tpid
#. set up testpmd with fortville VF0 NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3e0 -n 4 --socket-mem=1024,1024 --file-prefix=vf0 -w 81:02.0 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3e0 -n 4 --socket-mem=1024,1024 --file-prefix=vf0 -a 81:02.0 -- -i --rxq=4 --txq=4
#. PMD fwd only receive the packets::
@@ -257,7 +257,7 @@ Test Case 4: qinq packet filter with different tpid
#. set up testpmd with fortville VF1 NICs::
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x7c0 -n 4 --socket-mem=1024,1024 --file-prefix=vf1 -w 81:02.1 -- -i --rxq=4 --txq=4
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x7c0 -n 4 --socket-mem=1024,1024 --file-prefix=vf1 -a 81:02.1 -- -i --rxq=4 --txq=4
#. PMD fwd only receive the packets::
diff --git a/test_plans/runtime_vf_queue_number_maxinum_test_plan.rst b/test_plans/runtime_vf_queue_number_maxinum_test_plan.rst
index 1e287e4..333b993 100644
--- a/test_plans/runtime_vf_queue_number_maxinum_test_plan.rst
+++ b/test_plans/runtime_vf_queue_number_maxinum_test_plan.rst
@@ -108,7 +108,7 @@ Test case 1: VF consume max queue number on one PF port
================================================================
1. Start the PF testpmd::
- ./testpmd -c f -n 4 -w 05:00.0 --file-prefix=test1 \
+ ./testpmd -c f -n 4 -a 05:00.0 --file-prefix=test1 \
--socket-mem 1024,1024 -- -i
2. Start the two testpmd to consume maximum queues::
@@ -120,10 +120,10 @@ Test case 1: VF consume max queue number on one PF port
The driver will alloc queues as power of 2, and queue must be equal or less than 16,
so the second VF testpmd can only start '--rxq=8 --txq=8'::
- ./testpmd -c 0xf0 -n 4 -w 05:02.0 -w 05:02.1 -w 05:02.2 -w... --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 05:02.0 -a 05:02.1 -a 05:02.2 -a... --file-prefix=test2 \
--socket-mem 1024,1024 -- -i --rxq=16 --txq=16
- ./testpmd -c 0xf00 -n 4 -w 05:05.7 --file-prefix=test3 \
+ ./testpmd -c 0xf00 -n 4 -a 05:05.7 --file-prefix=test3 \
--socket-mem 1024,1024 -- -i --rxq=8 --txq=8
Check the Max possible RX queues and TX queues of the two VFs are both 16::
@@ -154,7 +154,7 @@ Test case 2: set max queue number per vf on one pf port
As the feature description describe, the max value of queue-num-per-vf is 8
for Both two and four ports Fortville NIC::
- ./testpmd -c f -n 4 -w 05:00.0,queue-num-per-vf=16 --file-prefix=test1 \
+ ./testpmd -c f -n 4 -a 05:00.0,queue-num-per-vf=16 --file-prefix=test1 \
--socket-mem 1024,1024 -- -i
PF port failed to started with "i40e_pf_parameter_init():
diff --git a/test_plans/runtime_vf_queue_number_test_plan.rst b/test_plans/runtime_vf_queue_number_test_plan.rst
index cf07619..9d4f953 100644
--- a/test_plans/runtime_vf_queue_number_test_plan.rst
+++ b/test_plans/runtime_vf_queue_number_test_plan.rst
@@ -41,7 +41,7 @@ the VF queue number at runtime.
Snice DPDK 19.02, VF is able to request max to 16 queues and the PF EAL
parameter 'queue-num-per-vf' is redefined as the number of reserved queue
per VF. For example, if the PCI address of an i40e PF is aaaa:bb.cc,
-with the EAL parameter -w aaaa:bb.cc,queue-num-per-vf=8, the number of
+with the EAL parameter -a aaaa:bb.cc,queue-num-per-vf=8, the number of
reserved queue per VF created from this PF is 8. The valid values of
queue-num-per-vf inclues 1,2,4,8,16, if the value of queue-num-per-vf
is invalid, it is set as 4 forcibly, if there is no queue-num-per-vf
@@ -130,14 +130,14 @@ Test case 1: reserve valid vf queue number
1. Start PF testpmd with random queue-num-per-vf in [1, 2, 4, 8 ,16], for example, we use 4 as the reserved vf queue numbers::
- ./testpmd -c f -n 4 -w 18:00.0,queue-num-per-vf=4 \
+ ./testpmd -c f -n 4 -a 18:00.0,queue-num-per-vf=4 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
Note testpmd can be started normally without any wrong or error.
2. Start VF testpmd::
- ./testpmd -c 0xf0 -n 4 -w 03:00.0 \
+ ./testpmd -c 0xf0 -n 4 -a 03:00.0 \
--file-prefix=test2 --socket-mem 1024,1024 -- -i
3. VF request a queue number that is equal to reserved queue number, and we can not find VF reset while confiuring it::
@@ -195,7 +195,7 @@ Test case 2: reserve invalid VF queue number
1. Start PF testpmd with random queue-num-per-vf in [0, 3, 5-7 , 9-15, 17], for example, we use 0 as the reserved vf queue numbers::
- ./testpmd -c f -n 4 -w 18:00.0,queue-num-per-vf=0 \
+ ./testpmd -c f -n 4 -a 18:00.0,queue-num-per-vf=0 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
2. Verify testpmd started with logs as below::
@@ -207,12 +207,12 @@ Test case 3: set valid VF queue number in testpmd command-line options
1. Start PF testpmd::
- ./testpmd -c f -n 4 -w 18:00.0 \
+ ./testpmd -c f -n 4 -a 18:00.0 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
2. Start VF testpmd with "--rxq=[rxq] --txq=[txq]", and random valid values from 1 to 16, take 3 for example::
- ./testpmd -c 0xf0 -n 4 -w 18:02.0 --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 18:02.0 --file-prefix=test2 \
--socket-mem 1024,1024 -- -i --rxq=3 --txq=3
3. Configure vf forwarding prerequisits and start forwarding::
@@ -254,12 +254,12 @@ Test case 4: set invalid VF queue number in testpmd command-line options
1. Start PF testpmd::
- ./testpmd -c f -n 4 -w 18:00.0 \
+ ./testpmd -c f -n 4 -a 18:00.0 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
2. Start VF testpmd with "--rxq=0 --txq=0" ::
- ./testpmd -c 0xf0 -n 4 -w 18:02.0 --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 18:02.0 --file-prefix=test2 \
--socket-mem 1024,1024 -- -i --rxq=0 --txq=0
Verify testpmd exited with error as below::
@@ -268,7 +268,7 @@ Test case 4: set invalid VF queue number in testpmd command-line options
3. Start VF testpmd with "--rxq=17 --txq=17" ::
- ./testpmd -c 0xf0 -n 4 -w 18:02.0 --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 18:02.0 --file-prefix=test2 \
--socket-mem 1024,1024 -- -i --rxq=17 --txq=17
Verify testpmd exited with error as below::
@@ -280,12 +280,12 @@ Test case 5: set valid VF queue number with testpmd function command
1. Start PF testpmd::
- ./testpmd -c f -n 4 -w 18:00.0 \
+ ./testpmd -c f -n 4 -a 18:00.0 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
2. Start VF testpmd without setting "rxq" and "txq"::
- ./testpmd -c 0xf0 -n 4 -w 05:02.0 --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 05:02.0 --file-prefix=test2 \
--socket-mem 1024,1024 -- -i
3. Configure vf forwarding prerequisits and start forwarding::
@@ -307,12 +307,12 @@ Test case 6: set invalid VF queue number with testpmd function command
1. Start PF testpmd::
- ./testpmd -c f -n 4 -w 18:00.0 \
+ ./testpmd -c f -n 4 -a 18:00.0 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
2. Start VF testpmd without setting "rxq" and "txq"::
- ./testpmd -c 0xf0 -n 4 -w 05:02.0 --file-prefix=test2 \
+ ./testpmd -c 0xf0 -n 4 -a 05:02.0 --file-prefix=test2 \
--socket-mem 1024,1024 -- -i
@@ -344,7 +344,7 @@ Test case 7: Reserve VF queue number when VF bind to kernel driver
2. Reserve VF queue number ::
- ./testpmd -c f -n 4 -w 18:00.0,queue-num-per-vf=2 \
+ ./testpmd -c f -n 4 -a 18:00.0,queue-num-per-vf=2 \
--file-prefix=test1 --socket-mem 1024,1024 -- -i
3. Check the VF0 rxq and txq number is 2::
diff --git a/test_plans/unit_tests_dump_test_plan.rst b/test_plans/unit_tests_dump_test_plan.rst
index 8978fdf..c8832ff 100644
--- a/test_plans/unit_tests_dump_test_plan.rst
+++ b/test_plans/unit_tests_dump_test_plan.rst
@@ -175,7 +175,7 @@ stdout.
The steps to run the unit test manually are as follow::
# make -C ./app/test/
- # ./app/test/test -n 1 -c ffff -w|-b pci_address
+ # ./app/test/test -n 1 -c ffff -a|-b pci_address
RTE>> dump_devargs
The final output of the test will be the pci address of allow list
diff --git a/test_plans/unit_tests_event_timer_test_plan.rst b/test_plans/unit_tests_event_timer_test_plan.rst
index 58ac78c..192d983 100644
--- a/test_plans/unit_tests_event_timer_test_plan.rst
+++ b/test_plans/unit_tests_event_timer_test_plan.rst
@@ -12,7 +12,7 @@ test can be launched independently using the command line interface.
The steps to run the unit test manually are as follow::
# make -C ./app/test/
- # ./app/test/test -n 1 -c ffff -w <timerdev-pci-bus-id>,<devargs>
+ # ./app/test/test -n 1 -c ffff -a <timerdev-pci-bus-id>,<devargs>
RTE>> event_timer_adapter_test
The final output of the test has to be "Test OK"
diff --git a/test_plans/veb_switch_test_plan.rst b/test_plans/veb_switch_test_plan.rst
index e849732..ca8bd7e 100644
--- a/test_plans/veb_switch_test_plan.rst
+++ b/test_plans/veb_switch_test_plan.rst
@@ -112,7 +112,7 @@ Details:
1. In VF1, run testpmd::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3 -n 4 --socket-mem 1024,1024
- -w 05:02.0 --file-prefix=test1 -- -i --crc-strip --eth-peer=0,00:11:22:33:44:12
+ -a 05:02.0 --file-prefix=test1 -- -i --crc-strip --eth-peer=0,00:11:22:33:44:12
testpmd>set fwd txonly
testpmd>set promisc all off
testpmd>start
@@ -120,7 +120,7 @@ Details:
In VF2, run testpmd::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0xa -n 4 --socket-mem 1024,1024
- -w 05:02.1 --file-prefix=test2 -- -i --crc-strip
+ -a 05:02.1 --file-prefix=test2 -- -i --crc-strip
testpmd>set fwd rxonly
testpmd>set promisc all off
testpmd>start
@@ -140,7 +140,7 @@ Details:
1. In VF1, run testpmd::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3 -n 4 --socket-mem 1024,1024
- -w 05:02.0 --file-prefix=test1 -- -i --crc-strip --eth-peer=0,00:11:22:33:44:12
+ -a 05:02.0 --file-prefix=test1 -- -i --crc-strip --eth-peer=0,00:11:22:33:44:12
testpmd>set fwd mac
testpmd>set promisc all off
testpmd>start
@@ -148,7 +148,7 @@ Details:
In VF2, run testpmd::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0xa -n 4 --socket-mem 1024,1024
- -w 05:02.1 --file-prefix=test2 -- -i --crc-strip
+ -a 05:02.1 --file-prefix=test2 -- -i --crc-strip
testpmd>set fwd rxonly
testpmd>set promisc all off
testpmd>start
@@ -174,7 +174,7 @@ Details:
2. In VF1, run testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 0000:05:02.0
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 0000:05:02.0
--file-prefix=test1 -- -i --crc-strip --eth-peer=0,00:11:22:33:44:12
testpmd>set fwd mac
testpmd>set promisc all off
@@ -182,7 +182,7 @@ Details:
In VF2, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 0000:05:02.1
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 0000:05:02.1
--file-prefix=test2 -- -i --crc-strip
testpmd>set fwd rxonly
testpmd>set promisc all off
@@ -216,14 +216,14 @@ Details:
1. vf->pf
PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 0000:05:00.0 --file-prefix=test1 -- -i
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 0000:05:00.0 --file-prefix=test1 -- -i
testpmd>set fwd rxonly
testpmd>set promisc all off
testpmd>start
VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 0000:05:02.0 --file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 0000:05:02.0 --file-prefix=test2 -- -i --eth-peer=0,pf_mac_addr
testpmd>set fwd txonly
testpmd>set promisc all off
testpmd>start
@@ -234,14 +234,14 @@ Details:
2. pf->vf
PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 0000:05:00.0 --file-prefix=test1 -- -i --eth-peer=0,vf1_mac_addr
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 0000:05:00.0 --file-prefix=test1 -- -i --eth-peer=0,vf1_mac_addr
testpmd>set fwd txonly
testpmd>set promisc all off
testpmd>start
VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 0000:05:02.0 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 0000:05:02.0 --file-prefix=test2 -- -i
testpmd>mac_addr add 0 vf1_mac_addr
testpmd>set fwd rxonly
testpmd>set promisc all off
@@ -253,14 +253,14 @@ Details:
3. tester->vf
PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 0000:05:00.0 --file-prefix=test1 -- -i
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 0000:05:00.0 --file-prefix=test1 -- -i
testpmd>set fwd mac
testpmd>set promisc all off
testpmd>start
VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 0000:05:02.0 --file-prefix=test2 -- -i
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 0000:05:02.0 --file-prefix=test2 -- -i
testpmd>mac_addr add 0 vf1_mac_addr
testpmd>set fwd rxonly
testpmd>set promisc all off
@@ -273,19 +273,19 @@ Details:
4. vf1->vf2
PF, launch testpmd::
- ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -w 0000:05:00.0 --file-prefix=test1 -- -i
+ ./testpmd -c 0xf -n 4 --socket-mem 1024,1024 -a 0000:05:00.0 --file-prefix=test1 -- -i
testpmd>set promisc all off
VF1, run testpmd::
- ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -w 0000:05:02.0 --file-prefix=test2 -- -i --eth-peer=0,vf2_mac_addr
+ ./testpmd -c 0xf0 -n 4 --socket-mem 1024,1024 -a 0000:05:02.0 --file-prefix=test2 -- -i --eth-peer=0,vf2_mac_addr
testpmd>set fwd txonly
testpmd>set promisc all off
testpmd>start
VF2, run testpmd::
- ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -w 0000:05:02.1 --file-prefix=test3 -- -i
+ ./testpmd -c 0xf00 -n 4 --socket-mem 1024,1024 -a 0000:05:02.1 --file-prefix=test3 -- -i
testpmd>mac_addr add 0 vf2_mac_addr
testpmd>set fwd rxonly
testpmd>set promisc all off
diff --git a/test_plans/vf_l3fwd_test_plan.rst b/test_plans/vf_l3fwd_test_plan.rst
index efdedda..9fb97cc 100644
--- a/test_plans/vf_l3fwd_test_plan.rst
+++ b/test_plans/vf_l3fwd_test_plan.rst
@@ -156,7 +156,7 @@ take XL710 for example::
4, Start dpdk l3fwd with 1:1 matched cores and queues::
- ./examples/l3fwd/build/l3fwd -c 0x3c -n 4 -w 0000:18:02.0 -w 0000:18:06.0 -- -p 0x3 --config '(0,0,2),(1,0,3),(0,1,4),(1,1,5)'
+ ./examples/l3fwd/build/l3fwd -c 0x3c -n 4 -a 0000:18:02.0 -a 0000:18:06.0 -- -p 0x3 --config '(0,0,2),(1,0,3),(0,1,4),(1,1,5)'
5, Send packet with frame size from 64bytes to 1518bytes with ixia traffic generator,
make sure your traffic configuration meets LPM rules, and will go to all queues, all ports.
diff --git a/test_plans/vf_macfilter_test_plan.rst b/test_plans/vf_macfilter_test_plan.rst
index c2fd298..ae2250a 100644
--- a/test_plans/vf_macfilter_test_plan.rst
+++ b/test_plans/vf_macfilter_test_plan.rst
@@ -97,7 +97,7 @@ Test Case 1: test_kernel_2pf_2vf_1vm_iplink_macfilter
disable promisc mode,set it in mac forward mode::
./usertools/dpdk-devbind.py --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> port stop all
testpmd> port config all crc-strip on
@@ -175,7 +175,7 @@ Test Case 2: test_kernel_2pf_2vf_1vm_mac_add_filter
VF, disable promisc mode, add a new MAC to VF0 and then start::
./usertools/dpdk-devbind.py --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> port stop all
testpmd> port config all crc-strip on
@@ -269,7 +269,7 @@ Test Case 3: test_dpdk_2pf_2vf_1vm_mac_add_filter
VF, disable promisc mode, add a new MAC to VF0 and then start::
./usertools/dpdk-devbind.py --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> port stop all
testpmd> port config all crc-strip on
@@ -365,7 +365,7 @@ Test Case 4: test_dpdk_2pf_2vf_1vm_iplink_macfilter
disable promisc mode, set it in mac forward mode::
./usertools/dpdk-devbind.py --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> port stop all
testpmd> port config all crc-strip on
diff --git a/test_plans/vf_packet_rxtx_test_plan.rst b/test_plans/vf_packet_rxtx_test_plan.rst
index 773758b..6c34f0b 100644
--- a/test_plans/vf_packet_rxtx_test_plan.rst
+++ b/test_plans/vf_packet_rxtx_test_plan.rst
@@ -96,7 +96,7 @@ Test Case 1: VF_packet_IO_kernel_PF_dpdk_VF
and then start testpmd, set it in mac forward mode::
./usertools/dpdk-devbind.py -s --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 \
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 \
-- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> set fwd mac
@@ -165,7 +165,7 @@ Test Case 2: VF_packet_IO_dpdk_PF_dpdk_VF
and then start testpmd, set it in mac forward mode::
./usertools/dpdk-devbind.py --bind=igb_uio 00:06.0 00:07.0
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -w 00:06.0 -w 00:07.0 \
+ ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 -a 00:06.0 -a 00:07.0 \
-- -i
testpmd> set fwd mac
diff --git a/test_plans/vf_pf_reset_test_plan.rst b/test_plans/vf_pf_reset_test_plan.rst
index d433d71..009e99a 100644
--- a/test_plans/vf_pf_reset_test_plan.rst
+++ b/test_plans/vf_pf_reset_test_plan.rst
@@ -160,11 +160,11 @@ Test Case 2: vf reset -- create two vfs on one pf, run testpmd separately
2. Start testpmd on two vf ports::
./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 \
- --socket-mem 1024,1024 -w 81:02.0 --file-prefix=test1 \
+ --socket-mem 1024,1024 -a 81:02.0 --file-prefix=test1 \
-- -i --eth-peer=0,00:11:22:33:44:12 \
./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf0 -n 4 \
- --socket-mem 1024,1024 -w 81:02.1 --file-prefix=test2 \
+ --socket-mem 1024,1024 -a 81:02.1 --file-prefix=test2 \
-- -i
3. Set fwd mode on vf0::
@@ -545,7 +545,7 @@ test Case 9: vf reset (two vfs passed through to one VM)
./usertools/dpdk-devbind.py -b igb_uio 00:05.0 00:05.1
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x0f -n 4 \
- -w 00:05.0 -w 00:05.1 -- -i --portmask=0x3
+ -a 00:05.0 -a 00:05.1 -- -i --portmask=0x3
5. Add MAC address to the vf0 ports, set it in mac forward mode::
diff --git a/test_plans/vf_vlan_test_plan.rst b/test_plans/vf_vlan_test_plan.rst
index 5eaa994..47b4249 100644
--- a/test_plans/vf_vlan_test_plan.rst
+++ b/test_plans/vf_vlan_test_plan.rst
@@ -87,7 +87,7 @@ Prerequisites
5. Start testpmd, set it in rxonly mode and enable verbose output::
- testpmd -c 0x0f -n 4 -w 00:04.0 -w 00:05.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
+ testpmd -c 0x0f -n 4 -a 00:04.0 -a 00:05.0 -- -i --portmask=0x3 --tx-offloads=0x8fff
testpmd> set fwd rxonly
testpmd> set verbose 1
testpmd> start
diff --git a/test_plans/vhost_virtio_user_interrupt_test_plan.rst b/test_plans/vhost_virtio_user_interrupt_test_plan.rst
index 42e645f..2ac6a38 100644
--- a/test_plans/vhost_virtio_user_interrupt_test_plan.rst
+++ b/test_plans/vhost_virtio_user_interrupt_test_plan.rst
@@ -206,7 +206,7 @@ flow: Vhost <--> Virtio
1. Bind one cbdma port to igb_uio driver, then start vhost-user side::
- ./testpmd -c 0x3000 -n 4 -w 00:04.0 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[txq0@00:04.0]' -- -i
+ ./testpmd -c 0x3000 -n 4 -a 00:04.0 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[txq0@00:04.0]' -- -i
testpmd>set fwd mac
testpmd>start
@@ -254,7 +254,7 @@ flow: Vhost <--> Virtio
1. Bind one cbdma port to igb_uio driver, then start vhost-user side::
- ./testpmd -c 0x3000 -n 4 -w 00:04.0 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[txq0@00:04.0]' -- -i
+ ./testpmd -c 0x3000 -n 4 -a 00:04.0 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[txq0@00:04.0]' -- -i
testpmd>set fwd mac
testpmd>start
diff --git a/test_plans/virtio_pvp_regression_test_plan.rst b/test_plans/virtio_pvp_regression_test_plan.rst
index df76b54..fb45c56 100644
--- a/test_plans/virtio_pvp_regression_test_plan.rst
+++ b/test_plans/virtio_pvp_regression_test_plan.rst
@@ -150,7 +150,7 @@ Test Case 3: pvp test with virtio 0.95 vrctor_rx path
3. On VM, bind virtio net to igb_uio and run testpmd without tx-offloads, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x7 -n 3 -w 0000:xx.00,vectorized -- -i \
+ ./testpmd -c 0x7 -n 3 -a 0000:xx.00,vectorized -- -i \
--nb-cores=2 --rxq=2 --txq=2 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -267,7 +267,7 @@ Test Case 6: pvp test with virtio 1.0 vrctor_rx path
3. On VM, bind virtio net to igb_uio and run testpmd without tx-offloads, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x7 -n 3 -w 0000:xx.00,vectorized -- -i \
+ ./testpmd -c 0x7 -n 3 -a 0000:xx.00,vectorized -- -i \
--nb-cores=2 --rxq=2 --txq=2 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
diff --git a/test_plans/vm2vm_virtio_pmd_test_plan.rst b/test_plans/vm2vm_virtio_pmd_test_plan.rst
index 8914e7a..7280af9 100644
--- a/test_plans/vm2vm_virtio_pmd_test_plan.rst
+++ b/test_plans/vm2vm_virtio_pmd_test_plan.rst
@@ -48,7 +48,7 @@ Test Case 1: VM2VM vhost-user/virtio-pmd with vector_rx path
1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::
rm -rf vhost-net*
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -78,13 +78,13 @@ Test Case 1: VM2VM vhost-user/virtio-pmd with vector_rx path
3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -a 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 and send 64B packets, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -a 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
testpmd>set fwd txonly
testpmd>set txpkts 64
testpmd>start tx_first 32
@@ -103,7 +103,7 @@ Test Case 2: VM2VM vhost-user/virtio-pmd with normal path
1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::
rm -rf vhost-net*
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -133,13 +133,13 @@ Test Case 2: VM2VM vhost-user/virtio-pmd with normal path
3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
4. On VM2, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio2 and send 64B packets ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd txonly
testpmd>set txpkts 64
testpmd>start tx_first 32
@@ -158,7 +158,7 @@ Test Case 3: VM2VM vhost-user/virtio1.0-pmd with vector_rx path
1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::
rm -rf vhost-net*
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -188,13 +188,13 @@ Test Case 3: VM2VM vhost-user/virtio1.0-pmd with vector_rx path
3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -a 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2, [0000:xx.00] is [Bus,Device,Function] of virtio-net::
- ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -a 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024
testpmd>set fwd txonly
testpmd>set txpkts 64
testpmd>start tx_first 32
@@ -213,7 +213,7 @@ Test Case 4: VM2VM vhost-user/virtio1.0-pmd with normal path
1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::
rm -rf vhost-net*
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -243,13 +243,13 @@ Test Case 4: VM2VM vhost-user/virtio1.0-pmd with normal path
3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd txonly
testpmd>set txpkts 64
testpmd>start tx_first 32
@@ -267,7 +267,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check
1. Bind virtio with igb_uio driver, launch the testpmd by below commands::
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -310,7 +310,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check
4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd rxonly
testpmd>start
@@ -320,7 +320,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check
6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd mac
testpmd>set txpkts 2000,2000,2000,2000
@@ -333,7 +333,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check
9. Relaunch testpmd in VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
@@ -343,7 +343,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check
11. Relaunch testpmd on VM2, send ten 64B packets from virtio-pmd on VM2::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>set burst 1
testpmd>start tx_first 10
@@ -355,7 +355,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch
1. Bind virtio with igb_uio driver, launch the testpmd by below commands::
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -398,7 +398,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch
4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd rxonly
testpmd>start
@@ -408,7 +408,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch
6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd mac
testpmd>set txpkts 2000,2000,2000,2000
@@ -421,7 +421,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch
9. Relaunch testpmd in VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
@@ -431,7 +431,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch
11. Relaunch testpmd On VM2, send ten 64B packets from virtio-pmd on VM2::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd mac
testpmd>set burst 1
testpmd>start tx_first 10
@@ -443,7 +443,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch
1. Bind virtio with igb_uio driver, launch the testpmd by below commands::
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -486,7 +486,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch
4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd rxonly
testpmd>start
@@ -496,7 +496,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch
6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd mac
testpmd>set txpkts 2000,2000,2000,2000
@@ -509,7 +509,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch
9. Relaunch testpmd in VM1::
- ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
@@ -519,7 +519,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch
11. Relaunch testpmd On VM2, send ten 64B packets from virtio-pmd on VM2::
- ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set fwd mac
testpmd>set burst 1
testpmd>start tx_first 10
@@ -532,7 +532,7 @@ Test Case 8: VM2VM vhost-user/virtio1.1-pmd with normal path
1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::
rm -rf vhost-net*
- ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1' -- -i --nb-cores=1 --txd=1024 --rxd=1024
testpmd>set fwd mac
testpmd>start
@@ -562,13 +562,13 @@ Test Case 8: VM2VM vhost-user/virtio1.1-pmd with normal path
3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd rxonly
testpmd>start
4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 ::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024
testpmd>set fwd txonly
testpmd>set txpkts 64
testpmd>start tx_first 32
@@ -630,7 +630,7 @@ Test Case 9: VM2VM virtio-pmd split ring mergeable path 8 queues CBDMA enable wi
5. Launch testpmd in VM2, sent imix pkts from VM2::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>set txpkts 64,256,512,1024,2000,64,256,512,1024,2000
testpmd>start tx_first 1
@@ -698,13 +698,13 @@ Test Case 10: VM2VM virtio-pmd split ring mergeable path dynamic queue size CBDM
4. Launch testpmd in VM1::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>start
5. Launch testpmd in VM2 and send imix pkts, check imix packets can looped between two VMs for 1 mins and 4 queues (queue0 to queue3) have packets rx/tx::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>set txpkts 64,256,512,1024,2000,64,256,512,1024,2000
testpmd>start tx_first 32
@@ -770,13 +770,13 @@ Test Case 11: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable
4. Launch testpmd in VM1::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>start
5. Launch testpmd in VM2 and send imix pkts, check imix packets can looped between two VMs for 1 mins and 8 queues all have packets rx/tx::
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>set txpkts 64,256,512,1024,20000,64,256,512,1024,20000
testpmd>start tx_first 32
@@ -802,7 +802,7 @@ Test Case 11: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable
modprobe vfio-pci
echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
./usertools/dpdk-devbind.py --force --bind=vfio-pci 0000:00:05.0
- ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
+ ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600
testpmd>set mac fwd
testpmd>set txpkts 64,256,512,1024,20000,64,256,512,1024,20000
testpmd>start tx_first 32
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dts] [PATCH V1 3/3] conf/*: changed eal -w parameter to -a
2021-10-13 9:24 [dts] [PATCH V1 1/3] tests/*: changed eal -w parameter to -a Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 2/3] test_plans/*: " Jun Dong
@ 2021-10-13 9:24 ` Jun Dong
2021-10-18 4:46 ` Tu, Lijuan
1 sibling, 1 reply; 4+ messages in thread
From: Jun Dong @ 2021-10-13 9:24 UTC (permalink / raw)
To: dts; +Cc: PingX.Yu, weix.ling, junx.dong
- changed eal parameter -w to -a for some config file
Signed-off-by: Jun Dong <junx.dong@intel.com>
---
conf/compressdev_sample.cfg | 2 +-
conf/pktgen.cfg | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/conf/compressdev_sample.cfg b/conf/compressdev_sample.cfg
index f2841a3..b286121 100644
--- a/conf/compressdev_sample.cfg
+++ b/conf/compressdev_sample.cfg
@@ -13,4 +13,4 @@ input-file = "/root/calgary"
#[test_qat_pmd_dynamic_func]
#l="0-6"
-#w="1a:01.0 -w 1c:01.0 -w 1e:01.0"
+#w="1a:01.0 -a 1c:01.0 -a 1e:01.0"
diff --git a/conf/pktgen.cfg b/conf/pktgen.cfg
index 17703eb..3176f49 100644
--- a/conf/pktgen.cfg
+++ b/conf/pktgen.cfg
@@ -15,7 +15,7 @@
# num -n: Number of memory channels
# proc_type --proc-type: Type of this process
# pci_blocklist --pci-blocklist, -b: Add a PCI device in block list.
-# pci_allowlist --pci-allowlist, -w: Add a PCI device in allow list.
+# pci_allowlist --pci-allowlist, -a: Add a PCI device in allow list.
# file_prefix --file-prefix: Prefix for hugepage filenames
# socket_memory --socket-mem: Memory to allocate on specific sockets
# mapping_ports -m: Matrix for mapping ports to logical cores.
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dts] [PATCH V1 3/3] conf/*: changed eal -w parameter to -a
2021-10-13 9:24 ` [dts] [PATCH V1 3/3] conf/*: " Jun Dong
@ 2021-10-18 4:46 ` Tu, Lijuan
0 siblings, 0 replies; 4+ messages in thread
From: Tu, Lijuan @ 2021-10-18 4:46 UTC (permalink / raw)
To: Dong, JunX, dts; +Cc: Yu, PingX, Ling, WeiX, Dong, JunX
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Jun Dong
> Sent: 2021年10月13日 17:25
> To: dts@dpdk.org
> Cc: Yu, PingX <pingx.yu@intel.com>; Ling, WeiX <weix.ling@intel.com>; Dong,
> JunX <junx.dong@intel.com>
> Subject: [dts] [PATCH V1 3/3] conf/*: changed eal -w parameter to -a
>
> - changed eal parameter -w to -a for some config file
>
> Signed-off-by: Jun Dong <junx.dong@intel.com>
Applied
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-10-18 4:46 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-13 9:24 [dts] [PATCH V1 1/3] tests/*: changed eal -w parameter to -a Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 2/3] test_plans/*: " Jun Dong
2021-10-13 9:24 ` [dts] [PATCH V1 3/3] conf/*: " Jun Dong
2021-10-18 4:46 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).