* [dpdk-dev] [PATCH 1/4] testpmd: add command to start/stop specfic queue
2014-08-14 7:34 [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Chen Jing D(Mark)
@ 2014-08-14 7:35 ` Chen Jing D(Mark)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop Chen Jing D(Mark)
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Chen Jing D(Mark) @ 2014-08-14 7:35 UTC (permalink / raw)
To: dev
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
rte_ether library provide function pointer to start/stop specific
RX/TX queue, NIC driver also implemented functions. This change
adds command in testpmd to start/stop specific RX/TX queue.
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Reviewed-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Changchun Ouyang <changchun.ouyang@intel.com>
Reviewed-by: Huawei Xie <huawei.xie@intel.com>
---
app/test-pmd/cmdline.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++
app/test-pmd/config.c | 6 ++--
app/test-pmd/testpmd.c | 12 ++++++
app/test-pmd/testpmd.h | 4 ++
4 files changed, 114 insertions(+), 3 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 345be11..ddf5def 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -565,6 +565,10 @@ static void cmd_help_long_parsed(void *parsed_result,
" tx rs bit threshold.\n\n"
"port config mtu X value\n"
" Set the MTU of port X to a given value\n\n"
+
+ "port (port_id) (rxq|txq) (queue_id) (start|stop)\n"
+ " Start/stop a rx/tx queue of port X. Only take effect"
+ " when port X is started\n"
);
}
@@ -1425,6 +1429,96 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
},
};
+/* *** configure port rxq/txq start/stop *** */
+struct cmd_config_rxtx_queue {
+ cmdline_fixed_string_t port;
+ uint8_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t opname;
+};
+
+static void
+cmd_config_rxtx_queue_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rxtx_queue *res = parsed_result;
+ uint8_t isrx;
+ uint8_t isstart;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ if (port_id_is_invalid(res->portid))
+ return;
+
+ if (port_is_started(res->portid) != 1) {
+ printf("Please start port %u first\n", res->portid);
+ return;
+ }
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid))
+ return;
+ else if (!isrx && tx_queue_id_is_invalid(res->qid))
+ return;
+
+ if (!strcmp(res->opname, "start"))
+ isstart = 1;
+ else if (!strcmp(res->opname, "stop"))
+ isstart = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isstart && isrx)
+ rte_eth_dev_rx_queue_start(res->portid, res->qid);
+ else if (!isstart && isrx)
+ rte_eth_dev_rx_queue_stop(res->portid, res->qid);
+ else if (isstart && !isrx)
+ rte_eth_dev_tx_queue_start(res->portid, res->qid);
+ else
+ rte_eth_dev_tx_queue_stop(res->portid, res->qid);
+}
+
+cmdline_parse_token_string_t cmd_config_rxtx_queue_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, port, "port");
+cmdline_parse_token_num_t cmd_config_rxtx_queue_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, portid, UINT8);
+cmdline_parse_token_string_t cmd_config_rxtx_queue_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_config_rxtx_queue_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, qid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_queue_opname =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, opname,
+ "start#stop");
+
+cmdline_parse_inst_t cmd_config_rxtx_queue = {
+ .f = cmd_config_rxtx_queue_parsed,
+ .data = NULL,
+ .help_str = "port X rxq|txq ID start|stop",
+ .tokens = {
+ (void *)&cmd_config_speed_all_port,
+ (void *)&cmd_config_rxtx_queue_portid,
+ (void *)&cmd_config_rxtx_queue_rxtxq,
+ (void *)&cmd_config_rxtx_queue_qid,
+ (void *)&cmd_config_rxtx_queue_opname,
+ NULL,
+ },
+};
+
/* *** Configure RSS RETA *** */
struct cmd_config_rss_reta {
cmdline_fixed_string_t port;
@@ -7393,6 +7487,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_max_pkt_len,
(cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
(cmdline_parse_inst_t *)&cmd_config_rss,
+ (cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
(cmdline_parse_inst_t *)&cmd_config_burst,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index c72f6ee..606e34a 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -311,7 +311,7 @@ port_infos_display(portid_t port_id)
}
}
-static int
+int
port_id_is_invalid(portid_t port_id)
{
if (port_id < nb_ports)
@@ -521,7 +521,7 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
/*
* RX/TX ring descriptors display functions.
*/
-static int
+int
rx_queue_id_is_invalid(queueid_t rxq_id)
{
if (rxq_id < nb_rxq)
@@ -530,7 +530,7 @@ rx_queue_id_is_invalid(queueid_t rxq_id)
return 1;
}
-static int
+int
tx_queue_id_is_invalid(queueid_t txq_id)
{
if (txq_id < nb_txq)
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e8a4b45..a112559 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -1513,6 +1513,18 @@ all_ports_stopped(void)
return 1;
}
+int
+port_is_started(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id))
+ return -1;
+
+ if (ports[port_id].port_status != RTE_PORT_STARTED)
+ return 0;
+
+ return 1;
+}
+
void
pmd_test_exit(void)
{
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index ac86bfe..b8322a2 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -511,6 +511,7 @@ int start_port(portid_t pid);
void stop_port(portid_t pid);
void close_port(portid_t pid);
int all_ports_stopped(void);
+int port_is_started(portid_t port_id);
void pmd_test_exit(void);
void fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
@@ -547,6 +548,9 @@ void get_ethertype_filter(uint8_t port_id, uint16_t index);
void get_2tuple_filter(uint8_t port_id, uint16_t index);
void get_5tuple_filter(uint8_t port_id, uint16_t index);
void get_flex_filter(uint8_t port_id, uint16_t index);
+int port_id_is_invalid(portid_t port_id);
+int rx_queue_id_is_invalid(queueid_t rxq_id);
+int tx_queue_id_is_invalid(queueid_t txq_id);
/*
* Work-around of a compilation error with ICC on invocations of the
--
1.7.7.6
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop
2014-08-14 7:34 [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Chen Jing D(Mark)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 1/4] testpmd: add command to start/stop specfic queue Chen Jing D(Mark)
@ 2014-08-14 7:35 ` Chen Jing D(Mark)
2014-08-14 7:38 ` Cao, Min
2014-08-14 7:35 ` [dpdk-dev] [PATCH 3/4] i40e: PF driver to support RX/TX config paramter Chen Jing D(Mark)
` (2 subsequent siblings)
4 siblings, 1 reply; 7+ messages in thread
From: Chen Jing D(Mark) @ 2014-08-14 7:35 UTC (permalink / raw)
To: dev
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
I40E driver add function pointer to start/stop specific RX/TX queue.
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Reviewed-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Changchun Ouyang <changchun.ouyang@intel.com>
Reviewed-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_pmd_i40e/i40e_ethdev.c | 4 +
lib/librte_pmd_i40e/i40e_rxtx.c | 112 +++++++++++++++++++++++++++++++++++++
lib/librte_pmd_i40e/i40e_rxtx.h | 4 +
3 files changed, 120 insertions(+), 0 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 9ed31b5..81a1deb 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -232,6 +232,10 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.vlan_offload_set = i40e_vlan_offload_set,
.vlan_strip_queue_set = i40e_vlan_strip_queue_set,
.vlan_pvid_set = i40e_vlan_pvid_set,
+ .rx_queue_start = i40e_dev_rx_queue_start,
+ .rx_queue_stop = i40e_dev_rx_queue_stop,
+ .tx_queue_start = i40e_dev_tx_queue_start,
+ .tx_queue_stop = i40e_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 83b9462..323c004 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1429,6 +1429,118 @@ i40e_xmit_pkts_simple(void *tx_queue,
}
int
+i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ int err = -1;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
+ rx_queue_id);
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+ }
+
+ return err;
+}
+
+int
+i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
+ rx_queue_id);
+ return err;
+ }
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ int err = -1;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
+ tx_queue_id);
+ }
+
+ return err;
+}
+
+int
+i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_tx_queue *txq;
+ int err;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ }
+
+ return 0;
+}
+
+int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.h b/lib/librte_pmd_i40e/i40e_rxtx.h
index 6db2faf..b67b4b3 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.h
+++ b/lib/librte_pmd_i40e/i40e_rxtx.h
@@ -152,6 +152,10 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
};
+int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
--
1.7.7.6
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop
2014-08-14 7:35 ` [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop Chen Jing D(Mark)
@ 2014-08-14 7:38 ` Cao, Min
0 siblings, 0 replies; 7+ messages in thread
From: Cao, Min @ 2014-08-14 7:38 UTC (permalink / raw)
To: Chen, Jing D, dev
I will verified this next week. :)
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Chen Jing D(Mark)
Sent: Thursday, August 14, 2014 3:35 PM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
I40E driver add function pointer to start/stop specific RX/TX queue.
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Reviewed-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Changchun Ouyang <changchun.ouyang@intel.com>
Reviewed-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_pmd_i40e/i40e_ethdev.c | 4 +
lib/librte_pmd_i40e/i40e_rxtx.c | 112 +++++++++++++++++++++++++++++++++++++
lib/librte_pmd_i40e/i40e_rxtx.h | 4 +
3 files changed, 120 insertions(+), 0 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 9ed31b5..81a1deb 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -232,6 +232,10 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.vlan_offload_set = i40e_vlan_offload_set,
.vlan_strip_queue_set = i40e_vlan_strip_queue_set,
.vlan_pvid_set = i40e_vlan_pvid_set,
+ .rx_queue_start = i40e_dev_rx_queue_start,
+ .rx_queue_stop = i40e_dev_rx_queue_stop,
+ .tx_queue_start = i40e_dev_tx_queue_start,
+ .tx_queue_stop = i40e_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 83b9462..323c004 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1429,6 +1429,118 @@ i40e_xmit_pkts_simple(void *tx_queue,
}
int
+i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ int err = -1;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
+ rx_queue_id);
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+ }
+
+ return err;
+}
+
+int
+i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
+ rx_queue_id);
+ return err;
+ }
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ int err = -1;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
+ tx_queue_id);
+ }
+
+ return err;
+}
+
+int
+i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+ struct i40e_tx_queue *txq;
+ int err;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t q_base = vsi->base_queue;
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ }
+
+ return 0;
+}
+
+int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.h b/lib/librte_pmd_i40e/i40e_rxtx.h
index 6db2faf..b67b4b3 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.h
+++ b/lib/librte_pmd_i40e/i40e_rxtx.h
@@ -152,6 +152,10 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
};
+int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
--
1.7.7.6
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 3/4] i40e: PF driver to support RX/TX config paramter
2014-08-14 7:34 [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Chen Jing D(Mark)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 1/4] testpmd: add command to start/stop specfic queue Chen Jing D(Mark)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 2/4] i40e: PF Add support for per-queue start/stop Chen Jing D(Mark)
@ 2014-08-14 7:35 ` Chen Jing D(Mark)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 4/4] i40e: VF driver to support per-queue RX/TX start/stop Chen Jing D(Mark)
2014-08-25 14:39 ` [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Thomas Monjalon
4 siblings, 0 replies; 7+ messages in thread
From: Chen Jing D(Mark) @ 2014-08-14 7:35 UTC (permalink / raw)
To: dev
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
PF driver to support field start_rx_per_q in i40e_dev_rx_queue_setup
and start_tx_per_q in i40e_dev_tx_queue_setup. In the meanwhile,
Change dev_start/stop function to call per-queue RX/TX function.
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Reviewed-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Changchun Ouyang <changchun.ouyang@intel.com>
Reviewed-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_pmd_i40e/i40e_ethdev.c | 46 +++++++++++++++++++++---------------
lib/librte_pmd_i40e/i40e_rxtx.c | 21 +++++-----------
lib/librte_pmd_i40e/i40e_rxtx.h | 5 ++++
3 files changed, 39 insertions(+), 33 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 81a1deb..d5dd6e3 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -690,7 +690,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
err_up:
i40e_vsi_switch_queues(vsi, FALSE);
- i40e_dev_clear_queues(dev);
return ret;
}
@@ -704,9 +703,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
/* Disable all queues */
i40e_vsi_switch_queues(vsi, FALSE);
- /* Clear all queues and release memory */
- i40e_dev_clear_queues(dev);
-
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(vsi);
i40e_vsi_queues_unbind_intr(vsi);
@@ -2845,17 +2841,23 @@ static int
i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
{
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
- struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_tx_queue *txq;
- uint16_t i, pf_q;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+ uint16_t i;
int ret;
- pf_q = vsi->base_queue;
- for (i = 0; i < dev_data->nb_tx_queues; i++, pf_q++) {
+ for (i = 0; i < dev_data->nb_tx_queues; i++) {
txq = dev_data->tx_queues[i];
- if (!txq->q_set)
- continue; /* Queue not configured */
- ret = i40e_switch_tx_queue(hw, pf_q, on);
+ /**
+ * Don't operate the queue in 2 cases. One is queue is never configured,
+ * another is it's start operation and start_tx_per_q is set.
+ **/
+ if (!txq->q_set || (on && txq->start_tx_per_q))
+ continue;
+ if (on)
+ ret = i40e_dev_tx_queue_start(dev, i);
+ else
+ ret = i40e_dev_tx_queue_stop(dev, i);
if ( ret != I40E_SUCCESS)
return ret;
}
@@ -2919,18 +2921,24 @@ static int
i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
{
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
- struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_rx_queue *rxq;
- uint16_t i, pf_q;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+ uint16_t i;
int ret;
- pf_q = vsi->base_queue;
- for (i = 0; i < dev_data->nb_rx_queues; i++, pf_q++) {
+ for (i = 0; i < dev_data->nb_rx_queues; i++) {
rxq = dev_data->rx_queues[i];
- if (!rxq->q_set)
- continue; /* Queue not configured */
- ret = i40e_switch_rx_queue(hw, pf_q, on);
- if ( ret != I40E_SUCCESS)
+ /**
+ * Don't operate the queue in 2 cases. One is queue is never configured,
+ * another is it's start operation and start_rx_per_q is set.
+ **/
+ if (!rxq->q_set || (on && rxq->start_rx_per_q))
+ continue;
+ if (on)
+ ret = i40e_dev_rx_queue_start(dev, i);
+ else
+ ret = i40e_dev_rx_queue_stop(dev, i);
+ if (ret != I40E_SUCCESS)
return ret;
}
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 323c004..f153844 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -88,9 +88,6 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
uint16_t queue_id,
uint32_t ring_size,
int socket_id);
-static void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
-static void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
-static void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -1594,6 +1591,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
+ rxq->start_rx_per_q = rx_conf->start_rx_per_q;
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
@@ -1879,6 +1877,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;
+ txq->start_tx_per_q = tx_conf->start_tx_per_q;
#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
@@ -1986,7 +1985,7 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
}
-static void
+void
i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
{
unsigned i;
@@ -2017,7 +2016,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
-static void
+void
i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
{
uint16_t i;
@@ -2035,7 +2034,7 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
}
}
-static void
+void
i40e_reset_tx_queue(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txe;
@@ -2273,7 +2272,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
}
rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
- err = i40e_alloc_rx_queue_mbufs(rxq);
+
mbp_priv = rte_mempool_get_priv(rxq->mp);
buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
RTE_PKTMBUF_HEADROOM);
@@ -2284,16 +2283,10 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
- rte_wmb();
-
/* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
-
- return err;
+ return 0;
}
void
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.h b/lib/librte_pmd_i40e/i40e_rxtx.h
index b67b4b3..687e23c 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.h
+++ b/lib/librte_pmd_i40e/i40e_rxtx.h
@@ -112,6 +112,7 @@ struct i40e_rx_queue {
uint16_t max_pkt_len; /* Maximum packet length */
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
+ bool start_rx_per_q; /**< don't start this queue in dev start */
};
struct i40e_tx_entry {
@@ -150,6 +151,7 @@ struct i40e_tx_queue {
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
+ bool start_tx_per_q; /**< don't Start this queue in dev start */
};
int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
@@ -183,6 +185,9 @@ int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
void i40e_dev_clear_queues(struct rte_eth_dev *dev);
+void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
+void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
+void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
--
1.7.7.6
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 4/4] i40e: VF driver to support per-queue RX/TX start/stop
2014-08-14 7:34 [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Chen Jing D(Mark)
` (2 preceding siblings ...)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 3/4] i40e: PF driver to support RX/TX config paramter Chen Jing D(Mark)
@ 2014-08-14 7:35 ` Chen Jing D(Mark)
2014-08-25 14:39 ` [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Thomas Monjalon
4 siblings, 0 replies; 7+ messages in thread
From: Chen Jing D(Mark) @ 2014-08-14 7:35 UTC (permalink / raw)
To: dev
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
VF driver to add per-queue RX/TX start/stop function. Support
field start_rx_per_q in rx_queue_setup and start_tx_per_q in
tx_queue_setup. In the meanwhile, Change dev_start/stop function
to call per-queue RX/TX function.
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Reviewed-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Changchun Ouyang <changchun.ouyang@intel.com>
Reviewed-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_pmd_i40e/i40e_ethdev_vf.c | 232 ++++++++++++++++++++++++++--------
1 files changed, 180 insertions(+), 52 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index 2726bfb..a9a5633 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -125,6 +125,10 @@ static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int i40evf_get_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link);
static int i40evf_init_vlan(struct rte_eth_dev *dev);
+static int i40evf_dev_rx_queue_start(struct rte_eth_dev *, uint16_t);
+static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *, uint16_t);
+static int i40evf_dev_tx_queue_start(struct rte_eth_dev *, uint16_t);
+static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *, uint16_t);
static struct eth_dev_ops i40evf_eth_dev_ops = {
.dev_configure = i40evf_dev_configure,
.dev_start = i40evf_dev_start,
@@ -140,6 +144,10 @@ static struct eth_dev_ops i40evf_eth_dev_ops = {
.vlan_filter_set = i40evf_vlan_filter_set,
.vlan_offload_set = i40evf_vlan_offload_set,
.vlan_pvid_set = i40evf_vlan_pvid_set,
+ .rx_queue_start = i40evf_dev_rx_queue_start,
+ .rx_queue_stop = i40evf_dev_rx_queue_stop,
+ .tx_queue_start = i40evf_dev_tx_queue_start,
+ .tx_queue_stop = i40evf_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
.rx_queue_release = i40e_dev_rx_queue_release,
.tx_queue_setup = i40e_dev_tx_queue_setup,
@@ -628,65 +636,94 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
}
static int
-i40evf_enable_queues(struct rte_eth_dev *dev)
+i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
+ bool on)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_virtchnl_queue_select queue_select;
- int err, i;
+ int err;
struct vf_cmd_info args;
-
+ memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = vf->vsi_res->vsi_id;
- queue_select.rx_queues = 0;
- /* Enable configured RX queues */
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- queue_select.rx_queues |= 1 << i;
-
- /* Enable configured TX queues */
- queue_select.tx_queues = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- queue_select.tx_queues |= 1 << i;
+ if (isrx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
- args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ if (on)
+ args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = cmd_result_buffer;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
- PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
+ PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n", isrx ? "RX" : "TX",
+ qid, on ? "on" : "off");
return err;
}
static int
-i40evf_disable_queues(struct rte_eth_dev *dev)
+i40evf_start_queues(struct rte_eth_dev *dev)
{
- struct i40e_virtchnl_queue_select queue_select;
- int err, i;
- struct vf_cmd_info args;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int i;
+ struct i40e_rx_queue *rxq;
+ struct i40e_tx_queue *txq;
- /* Enable configured RX queues */
- queue_select.rx_queues = 0;
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- queue_select.rx_queues |= 1 << i;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ if (rxq->start_rx_per_q)
+ continue;
+ if (i40evf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
+ i);
+ return -1;
+ }
+ }
- /* Enable configured TX queues */
- queue_select.tx_queues = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- queue_select.tx_queues |= 1 << i;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ if (txq->start_tx_per_q)
+ continue;
+ if (i40evf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
+ i);
+ return -1;
+ }
+ }
- args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
- args.in_args = (u8 *)&queue_select;
- args.in_args_size = sizeof(queue_select);
- args.out_buffer = cmd_result_buffer;
- args.out_size = I40E_AQ_BUF_SZ;
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_DISABLE_QUEUES\n");
+ return 0;
+}
- return err;
+static int
+i40evf_stop_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ /* Stop TX queues first */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
+ i);
+ return -1;
+ }
+ }
+
+ /* Then stop RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
+ i);
+ return -1;
+ }
+ }
+
+ return 0;
}
static int
@@ -1179,6 +1216,109 @@ i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
}
static int
+i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
+ rx_queue_id);
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
+ rx_queue_id);
+ return err;
+ }
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
+ tx_queue_id);
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ }
+
+ return 0;
+}
+
+static int
i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
int ret;
@@ -1194,16 +1334,12 @@ i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
static int
i40evf_rx_init(struct rte_eth_dev *dev)
{
- uint16_t i, j;
+ uint16_t i;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- if (i40e_alloc_rx_queue_mbufs(rxq[i]) != 0) {
- PMD_DRV_LOG(ERR, "alloc rx queues mbufs failed\n");
- goto err;
- }
rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
}
@@ -1212,13 +1348,6 @@ i40evf_rx_init(struct rte_eth_dev *dev)
I40EVF_WRITE_FLUSH(hw);
return 0;
-
-err:
- /* Release all mbufs */
- for (j = 0; j < i; j++)
- i40e_rx_queue_release_mbufs(rxq[j]);
-
- return -1;
}
static void
@@ -1307,17 +1436,17 @@ i40evf_dev_start(struct rte_eth_dev *dev)
goto err_queue;
}
- if (i40evf_enable_queues(dev) != 0) {
+ if (i40evf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed\n");
goto err_mac;
}
+
i40evf_enable_queues_intr(hw);
return 0;
err_mac:
i40evf_del_mac_addr(dev, &mac_addr);
err_queue:
- i40e_dev_clear_queues(dev);
return -1;
}
@@ -1329,8 +1458,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
i40evf_disable_queues_intr(hw);
- i40evf_disable_queues(dev);
- i40e_dev_clear_queues(dev);
+ i40evf_stop_queues(dev);
}
static int
--
1.7.7.6
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement
2014-08-14 7:34 [dpdk-dev] [PATCH 0/4] RX/TX queue start/stop enhancement Chen Jing D(Mark)
` (3 preceding siblings ...)
2014-08-14 7:35 ` [dpdk-dev] [PATCH 4/4] i40e: VF driver to support per-queue RX/TX start/stop Chen Jing D(Mark)
@ 2014-08-25 14:39 ` Thomas Monjalon
4 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2014-08-25 14:39 UTC (permalink / raw)
To: Chen Jing D(Mark); +Cc: dev
> This patch mainly includes 2 changes. One is in testpmd to add command
> testing specific RX/TX queue start/stop in Port X. Another change is in
> i40e, which implemented rx/tx_queue_start/stop in both PF and VF driver.
> In the meanwhile, support field start_rx_per_q in i40e_dev_rx_queue_setup
> and start_tx_per_q in i40e_dev_tx_queue_setup.
>
> Chen Jing D(Mark) (4):
> testpmd: add command to start/stop specfic queue
> i40e: PF Add support for per-queue start/stop
> i40e: PF driver to support RX/TX config paramter
> i40e: VF driver to support per-queue RX/TX start/stop
Applied for version 1.7.1.
The commit logs and comments in the patches had some typos and sentences
difficult to read. For next patches, I'd prefer simpler sentences.
I'm french so I know how it's difficult to write clear english text and
I think it's even harder for a chinese.
By the way, I see that Konstantin reviewed the patch, so I assume he checked
the code but he probably forgot to check comments.
Please understand that my comment is not specifically about you (author or
reviewer), but it's an example of what I've seen many times before.
I try to fix or reword them, especially in commit logs. But it would make
things easier if everyone could make an effort about typos and english writing.
Thanks
--
Thomas
^ permalink raw reply [flat|nested] 7+ messages in thread