From: Yanling Song <songyl@ramaxel.com>
To: <dev@dpdk.org>
Cc: <songyl@ramaxel.com>, <yanling.song@linux.dev>,
<yanggan@ramaxel.com>, <ferruh.yigit@intel.com>
Subject: [PATCH v1 24/25] net/spnic: support Tx/Rx queue start/stop
Date: Sat, 18 Dec 2021 10:51:51 +0800 [thread overview]
Message-ID: <9264e7c8882285da4980a4dda44d6690cecb5737.1639636621.git.songyl@ramaxel.com> (raw)
In-Reply-To: <cover.1639636621.git.songyl@ramaxel.com>
This commit support starting or stopping a specified Rx/Tx queue
For Rx queue:
when startting rx queue, mbuf will be allocated and fill rq
wqe with mbuf info, then add the qid to indirect table of RSS.
if the first rx queue is started, the valid bit in function table
will be setted so that the packets can be received to host.
when stopping rx queue, the PMD driver will poll the rx queue
until it is empty and release the mbuf, then the PMD driver will
remove the qid for RSS indirect table. if the last rx queue is
stopped, the valid bit in function table will be cleared.
For Rx queue:
when stopping tx queue, the PMD driver will wait until all tx
packets are sent and then releases all mbuf.
Signed-off-by: Yanling Song <songyl@ramaxel.com>
---
drivers/net/spnic/base/spnic_nic_cfg.c | 33 ++++
drivers/net/spnic/base/spnic_nic_cfg.h | 13 ++
drivers/net/spnic/spnic_ethdev.c | 82 +++++++++
drivers/net/spnic/spnic_rx.c | 222 +++++++++++++++++++++++++
drivers/net/spnic/spnic_rx.h | 4 +
5 files changed, 354 insertions(+)
diff --git a/drivers/net/spnic/base/spnic_nic_cfg.c b/drivers/net/spnic/base/spnic_nic_cfg.c
index ce77b306db..934f11a881 100644
--- a/drivers/net/spnic/base/spnic_nic_cfg.c
+++ b/drivers/net/spnic/base/spnic_nic_cfg.c
@@ -1289,6 +1289,39 @@ int spnic_vf_get_default_cos(void *hwdev, u8 *cos_id)
return 0;
}
+int spnic_set_rq_flush(void *hwdev, u16 q_id)
+{
+ struct spnic_cmd_set_rq_flush *rq_flush_msg = NULL;
+ struct spnic_cmd_buf *cmd_buf = NULL;
+ u64 out_param = EIO;
+ int err;
+
+ cmd_buf = spnic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = sizeof(*rq_flush_msg);
+
+ rq_flush_msg = cmd_buf->buf;
+ rq_flush_msg->local_rq_id = q_id;
+ rq_flush_msg->value = cpu_to_be32(rq_flush_msg->value);
+
+ err = spnic_cmdq_direct_resp(hwdev, SPNIC_MOD_L2NIC,
+ SPNIC_UCODE_CMD_SET_RQ_FLUSH, cmd_buf,
+ &out_param, 0);
+ if (err || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set rq flush, err:%d, out_param: %"PRIu64"",
+ err, out_param);
+ err = -EFAULT;
+ }
+
+ spnic_free_cmd_buf(cmd_buf);
+
+ return err;
+}
+
static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size)
{
diff --git a/drivers/net/spnic/base/spnic_nic_cfg.h b/drivers/net/spnic/base/spnic_nic_cfg.h
index e5e4ffea4b..e4b4a52d32 100644
--- a/drivers/net/spnic/base/spnic_nic_cfg.h
+++ b/drivers/net/spnic/base/spnic_nic_cfg.h
@@ -1069,6 +1069,19 @@ int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl);
*/
int spnic_vf_get_default_cos(void *hwdev, u8 *cos_id);
+/**
+ * Flush rx queue resource
+ *
+ * @param[in] hwdev
+ * Device pointer to hwdev
+ * @param[in] q_id
+ * rx queue id
+ *
+ * @retval zero : Success
+ * @retval non-zero : Failure
+ */
+int spnic_set_rq_flush(void *hwdev, u16 q_id);
+
/**
* Get service feature HW supported
*
diff --git a/drivers/net/spnic/spnic_ethdev.c b/drivers/net/spnic/spnic_ethdev.c
index e4db4afdfd..ff50715120 100644
--- a/drivers/net/spnic/spnic_ethdev.c
+++ b/drivers/net/spnic/spnic_ethdev.c
@@ -993,6 +993,80 @@ static void spnic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
spnic_delete_mc_addr_list(nic_dev);
}
+static int spnic_dev_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rq_id)
+{
+ struct spnic_rxq *rxq = NULL;
+ int rc;
+
+ if (rq_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rq_id];
+
+ rc = spnic_start_rq(dev, rxq);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Start rx queue failed, eth_dev:%s, queue_idx:%d",
+ dev->data->name, rq_id);
+ return rc;
+ }
+
+ dev->data->rx_queue_state[rq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int spnic_dev_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rq_id)
+{
+ struct spnic_rxq *rxq = NULL;
+ int rc;
+
+ if (rq_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rq_id];
+
+ rc = spnic_stop_rq(dev, rxq);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Stop rx queue failed, eth_dev:%s, queue_idx:%d",
+ dev->data->name, rq_id);
+ return rc;
+ }
+
+ dev->data->rx_queue_state[rq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int spnic_dev_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t sq_id)
+{
+ PMD_DRV_LOG(INFO, "Start tx queue, eth_dev:%s, queue_idx:%d",
+ dev->data->name, sq_id);
+ dev->data->tx_queue_state[sq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+}
+
+static int spnic_dev_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t sq_id)
+{
+ struct spnic_txq *txq = NULL;
+ int rc;
+
+ if (sq_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[sq_id];
+ rc = spnic_stop_sq(txq);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Stop tx queue failed, eth_dev:%s, queue_idx:%d",
+ dev->data->name, sq_id);
+ return rc;
+ }
+
+ dev->data->tx_queue_state[sq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
int spnic_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id)
{
@@ -2717,6 +2791,10 @@ static const struct eth_dev_ops spnic_pmd_ops = {
.tx_queue_setup = spnic_tx_queue_setup,
.rx_queue_release = spnic_rx_queue_release,
.tx_queue_release = spnic_tx_queue_release,
+ .rx_queue_start = spnic_dev_rx_queue_start,
+ .rx_queue_stop = spnic_dev_rx_queue_stop,
+ .tx_queue_start = spnic_dev_tx_queue_start,
+ .tx_queue_stop = spnic_dev_tx_queue_stop,
.rx_queue_intr_enable = spnic_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = spnic_dev_rx_queue_intr_disable,
.dev_start = spnic_dev_start,
@@ -2756,6 +2834,10 @@ static const struct eth_dev_ops spnic_pmd_vf_ops = {
.tx_queue_setup = spnic_tx_queue_setup,
.rx_queue_intr_enable = spnic_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = spnic_dev_rx_queue_intr_disable,
+ .rx_queue_start = spnic_dev_rx_queue_start,
+ .rx_queue_stop = spnic_dev_rx_queue_stop,
+ .tx_queue_start = spnic_dev_tx_queue_start,
+ .tx_queue_stop = spnic_dev_tx_queue_stop,
.dev_start = spnic_dev_start,
.link_update = spnic_link_update,
.rx_queue_release = spnic_rx_queue_release,
diff --git a/drivers/net/spnic/spnic_rx.c b/drivers/net/spnic/spnic_rx.c
index f990d10be4..cba746df88 100644
--- a/drivers/net/spnic/spnic_rx.c
+++ b/drivers/net/spnic/spnic_rx.c
@@ -486,6 +486,228 @@ void spnic_remove_rq_from_rx_queue_list(struct spnic_nic_dev *nic_dev,
nic_dev->num_rss = rss_queue_count;
}
+static void spnic_rx_queue_release_mbufs(struct spnic_rxq *rxq)
+{
+ u16 sw_ci, ci_mask, free_wqebbs;
+ u16 rx_buf_len;
+ u32 status, vlan_len, pkt_len;
+ u32 pkt_left_len = 0;
+ u32 nr_released = 0;
+ struct spnic_rx_info *rx_info;
+ volatile struct spnic_rq_cqe *rx_cqe;
+
+ sw_ci = spnic_get_rq_local_ci(rxq);
+ rx_info = &rxq->rx_info[sw_ci];
+ rx_cqe = &rxq->rx_cqe[sw_ci];
+ free_wqebbs = (u16)(spnic_get_rq_free_wqebb(rxq) + 1);
+ status = rx_cqe->status;
+ ci_mask = rxq->q_mask;
+
+ while (free_wqebbs < rxq->q_depth) {
+ rx_buf_len = rxq->buf_len;
+ if (pkt_left_len != 0) {
+ /* flush continues jumbo rqe */
+ pkt_left_len = (pkt_left_len <= rx_buf_len) ? 0 :
+ (pkt_left_len - rx_buf_len);
+ } else if (SPNIC_GET_RX_FLUSH(status)) {
+ /* flush one released rqe */
+ pkt_left_len = 0;
+ } else if (SPNIC_GET_RX_DONE(status)) {
+ /* flush single packet or first jumbo rqe */
+ vlan_len = rx_cqe->vlan_len;
+ pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len);
+ pkt_left_len = (pkt_len <= rx_buf_len) ? 0 :
+ (pkt_len - rx_buf_len);
+ } else {
+ break;
+ }
+
+ rte_pktmbuf_free(rx_info->mbuf);
+
+ rx_info->mbuf = NULL;
+ rx_cqe->status = 0;
+ nr_released++;
+ free_wqebbs++;
+
+ /* see next cqe */
+ sw_ci++;
+ sw_ci &= ci_mask;
+ rx_info = &rxq->rx_info[sw_ci];
+ rx_cqe = &rxq->rx_cqe[sw_ci];
+ status = rx_cqe->status;
+ }
+
+ spnic_update_rq_local_ci(rxq, nr_released);
+}
+
+int spnic_poll_rq_empty(struct spnic_rxq *rxq)
+{
+ unsigned long timeout;
+ int free_wqebb;
+ int err = -EFAULT;
+
+ timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+ do {
+ free_wqebb = spnic_get_rq_free_wqebb(rxq) + 1;
+ if (free_wqebb == rxq->q_depth) {
+ err = 0;
+ break;
+ }
+ spnic_rx_queue_release_mbufs(rxq);
+ rte_delay_us(1);
+ } while (time_before(jiffies, timeout));
+
+ return err;
+}
+
+void spnic_dump_cqe_status(struct spnic_rxq *rxq, u32 *cqe_done_cnt,
+ u32 *cqe_hole_cnt, u32 *head_ci,
+ u32 *head_done)
+{
+ u16 sw_ci;
+ u16 avail_pkts = 0;
+ u16 hit_done = 0;
+ u16 cqe_hole = 0;
+ u32 status;
+ volatile struct spnic_rq_cqe *rx_cqe;
+
+ sw_ci = spnic_get_rq_local_ci(rxq);
+ rx_cqe = &rxq->rx_cqe[sw_ci];
+ status = rx_cqe->status;
+ *head_done = SPNIC_GET_RX_DONE(status);
+ *head_ci = sw_ci;
+
+ for (sw_ci = 0; sw_ci < rxq->q_depth; sw_ci++) {
+ rx_cqe = &rxq->rx_cqe[sw_ci];
+
+ /* test current ci is done */
+ status = rx_cqe->status;
+ if (!SPNIC_GET_RX_DONE(status) ||
+ !SPNIC_GET_RX_FLUSH(status)) {
+ if (hit_done) {
+ cqe_hole++;
+ hit_done = 0;
+ }
+
+ continue;
+ }
+
+ avail_pkts++;
+ hit_done = 1;
+ }
+
+ *cqe_done_cnt = avail_pkts;
+ *cqe_hole_cnt = cqe_hole;
+}
+
+int spnic_stop_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq)
+{
+ struct spnic_nic_dev *nic_dev = rxq->nic_dev;
+ u32 cqe_done_cnt = 0;
+ u32 cqe_hole_cnt = 0;
+ u32 head_ci, head_done;
+ int err;
+
+ /* disable rxq intr */
+ spnic_dev_rx_queue_intr_disable(eth_dev, rxq->q_id);
+
+ /* lock dev queue switch */
+ rte_spinlock_lock(&nic_dev->queue_list_lock);
+
+ spnic_remove_rq_from_rx_queue_list(nic_dev, rxq->q_id);
+
+ if (nic_dev->rss_state == SPNIC_RSS_ENABLE) {
+ err = spnic_refill_indir_rqid(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Clear rq in indirect table failed, eth_dev:%s, queue_idx:%d\n",
+ nic_dev->dev_name, rxq->q_id);
+ spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+ goto set_indir_failed;
+ }
+ }
+
+ if (nic_dev->num_rss == 0) {
+ err = spnic_set_vport_enable(nic_dev->hwdev, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "%s Disable vport failed, rc:%d",
+ nic_dev->dev_name, err);
+ goto set_vport_failed;
+ }
+ }
+
+ /* unlock dev queue list switch */
+ rte_spinlock_unlock(&nic_dev->queue_list_lock);
+
+ /* Send flush rq cmd to uCode */
+ err = spnic_set_rq_flush(nic_dev->hwdev, rxq->q_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Flush rq failed, eth_dev:%s, queue_idx:%d\n",
+ nic_dev->dev_name, rxq->q_id);
+ goto rq_flush_failed;
+ }
+
+ err = spnic_poll_rq_empty(rxq);
+ if (err) {
+ spnic_dump_cqe_status(rxq, &cqe_done_cnt, &cqe_hole_cnt,
+ &head_ci, &head_done);
+ PMD_DRV_LOG(ERR, "Poll rq empty timeout, eth_dev:%s, queue_idx:%d, "
+ "mbuf_left:%d, cqe_done:%d, cqe_hole:%d, cqe[%d].done=%d\n",
+ nic_dev->dev_name, rxq->q_id,
+ rxq->q_depth - spnic_get_rq_free_wqebb(rxq),
+ cqe_done_cnt, cqe_hole_cnt, head_ci, head_done);
+ goto poll_rq_failed;
+ }
+
+ return 0;
+
+poll_rq_failed:
+rq_flush_failed:
+ rte_spinlock_lock(&nic_dev->queue_list_lock);
+set_vport_failed:
+ spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+ if (nic_dev->rss_state == SPNIC_RSS_ENABLE)
+ (void)spnic_refill_indir_rqid(rxq);
+set_indir_failed:
+ rte_spinlock_unlock(&nic_dev->queue_list_lock);
+ spnic_dev_rx_queue_intr_enable(eth_dev, rxq->q_id);
+ return err;
+}
+
+int spnic_start_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq)
+{
+ struct spnic_nic_dev *nic_dev = rxq->nic_dev;
+ int err = 0;
+
+ /* lock dev queue switch */
+ rte_spinlock_lock(&nic_dev->queue_list_lock);
+
+ spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+
+ spnic_rearm_rxq_mbuf(rxq);
+
+ if (nic_dev->rss_state == SPNIC_RSS_ENABLE) {
+ err = spnic_refill_indir_rqid(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Refill rq to indrect table failed, eth_dev:%s, queue_idx:%d err:%d\n",
+ nic_dev->dev_name, rxq->q_id, err);
+ spnic_remove_rq_from_rx_queue_list(nic_dev, rxq->q_id);
+ }
+ }
+
+ if (rxq->nic_dev->num_rss == 1) {
+ err = spnic_set_vport_enable(nic_dev->hwdev, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "%s enable vport failed, err:%d",
+ nic_dev->dev_name, err);
+ }
+
+ /* unlock dev queue list switch */
+ rte_spinlock_unlock(&nic_dev->queue_list_lock);
+
+ spnic_dev_rx_queue_intr_enable(eth_dev, rxq->q_id);
+
+ return err;
+}
static inline uint64_t spnic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
uint16_t *vlan_tci)
diff --git a/drivers/net/spnic/spnic_rx.h b/drivers/net/spnic/spnic_rx.h
index 5ae4b5f1ab..a876f75595 100644
--- a/drivers/net/spnic/spnic_rx.h
+++ b/drivers/net/spnic/spnic_rx.h
@@ -273,6 +273,10 @@ void spnic_dump_cqe_status(struct spnic_rxq *rxq, u32 *cqe_done_cnt,
u32 *cqe_hole_cnt, u32 *head_ci,
u32 *head_done);
+int spnic_stop_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq);
+
+int spnic_start_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq);
+
int spnic_start_all_rqs(struct rte_eth_dev *eth_dev);
u16 spnic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);
--
2.27.0
next prev parent reply other threads:[~2021-12-18 2:55 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-18 2:51 [PATCH v1 00/25] Net/SPNIC: support SPNIC into DPDK 22.03 Yanling Song
2021-12-18 2:51 ` [PATCH v1 01/25] drivers/net: introduce a new PMD driver Yanling Song
2021-12-19 19:40 ` Stephen Hemminger
2021-12-22 0:54 ` Yanling Song
2021-12-22 16:55 ` Stephen Hemminger
2021-12-23 8:10 ` Yanling Song
2021-12-18 2:51 ` [PATCH v1 02/25] net/spnic: initialize the HW interface Yanling Song
2021-12-18 2:51 ` [PATCH v1 03/25] net/spnic: add mbox message channel Yanling Song
2021-12-18 2:51 ` [PATCH v1 04/25] net/spnic: introduce event queue Yanling Song
2021-12-18 2:51 ` [PATCH v1 05/25] net/spnic: add mgmt module Yanling Song
2021-12-18 2:51 ` [PATCH v1 06/25] net/spnic: add cmdq and work queue Yanling Song
2021-12-18 2:51 ` [PATCH v1 07/25] net/spnic: add interface handling cmdq message Yanling Song
2021-12-18 2:51 ` [PATCH v1 08/25] net/spnic: add hardware info initialization Yanling Song
2021-12-18 2:51 ` [PATCH v1 09/25] net/spnic: support MAC and link event handling Yanling Song
2021-12-18 2:51 ` [PATCH v1 10/25] net/spnic: add function info initialization Yanling Song
2021-12-18 2:51 ` [PATCH v1 11/25] net/spnic: add queue pairs context initialization Yanling Song
2021-12-18 2:51 ` [PATCH v1 12/25] net/spnic: support mbuf handling of Tx/Rx Yanling Song
2021-12-18 2:51 ` [PATCH v1 13/25] net/spnic: support Rx congfiguration Yanling Song
2021-12-18 2:51 ` [PATCH v1 14/25] net/spnic: add port/vport enable Yanling Song
2021-12-18 2:51 ` [PATCH v1 15/25] net/spnic: support IO packets handling Yanling Song
2021-12-18 2:51 ` [PATCH v1 16/25] net/spnic: add device configure/version/info Yanling Song
2021-12-20 0:23 ` Stephen Hemminger
2021-12-22 0:56 ` Yanling Song
2021-12-18 2:51 ` [PATCH v1 17/25] net/spnic: support RSS configuration update and get Yanling Song
2021-12-18 2:51 ` [PATCH v1 18/25] net/spnic: support VLAN filtering and offloading Yanling Song
2021-12-18 2:51 ` [PATCH v1 19/25] net/spnic: support promiscuous and allmulticast Rx modes Yanling Song
2021-12-18 2:51 ` [PATCH v1 20/25] net/spnic: support flow control Yanling Song
2021-12-18 2:51 ` [PATCH v1 21/25] net/spnic: support getting Tx/Rx queues info Yanling Song
2021-12-18 2:51 ` [PATCH v1 22/25] net/spnic: net/spnic: support xstats statistics Yanling Song
2021-12-18 2:51 ` [PATCH v1 23/25] net/spnic: support VFIO interrupt Yanling Song
2021-12-18 2:51 ` Yanling Song [this message]
2021-12-18 2:51 ` [PATCH v1 25/25] net/spnic: add doc infrastructure Yanling Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9264e7c8882285da4980a4dda44d6690cecb5737.1639636621.git.songyl@ramaxel.com \
--to=songyl@ramaxel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=yanggan@ramaxel.com \
--cc=yanling.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).