From: Yong Wang <yongwang@vmware.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 3/5] vmxnet3: Fix dev stop/restart bug
Date: Sun, 12 Oct 2014 23:23:07 -0700 [thread overview]
Message-ID: <1413181389-14887-4-git-send-email-yongwang@vmware.com> (raw)
In-Reply-To: <1413181389-14887-1-git-send-email-yongwang@vmware.com>
This change makes vmxnet3 consistent with other pmds in
terms of dev_stop behavior: rather than releasing tx/rx
rings, it only resets the ring structure and release the
pending mbufs.
Verified with various tests (test-pmd and pktgen) over
vmxnet3 that dev stop/restart works fine.
Signed-off-by: Yong Wang <yongwang@vmware.com>
---
lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c | 78 ++++++++++++++++++++++++++++++++---
1 file changed, 73 insertions(+), 5 deletions(-)
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
index 0b6363f..2017d4b 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -157,7 +157,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
#endif
static inline void
-vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
{
while (ring->next2comp != ring->next2fill) {
/* No need to worry about tx desc ownership, device is quiesced by now. */
@@ -171,16 +171,23 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
}
vmxnet3_cmd_ring_adv_next2comp(ring);
}
+}
+
+static void
+vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+{
+ vmxnet3_cmd_ring_release_mbufs(ring);
rte_free(ring->buf_info);
ring->buf_info = NULL;
}
+
void
vmxnet3_dev_tx_queue_release(void *txq)
{
vmxnet3_tx_queue_t *tq = txq;
- if (txq != NULL) {
+ if (tq != NULL) {
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
}
@@ -192,13 +199,74 @@ vmxnet3_dev_rx_queue_release(void *rxq)
int i;
vmxnet3_rx_queue_t *rq = rxq;
- if (rxq != NULL) {
+ if (rq != NULL) {
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
}
}
+static void
+vmxnet3_dev_tx_queue_reset(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+ struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
+ struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
+ int size;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring mbufs */
+ vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring);
+ }
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+
+ memset(ring->base, 0, size);
+}
+
+static void
+vmxnet3_dev_rx_queue_reset(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_cmd_ring *ring0, *ring1;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
+
+ if (rq != NULL) {
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+ }
+
+ ring0 = &rq->cmd_ring[0];
+ ring1 = &rq->cmd_ring[1];
+ comp_ring = &rq->comp_ring;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+
+ memset(ring0->base, 0, size);
+}
+
void
vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
{
@@ -211,7 +279,7 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
if (txq != NULL) {
txq->stopped = TRUE;
- vmxnet3_dev_tx_queue_release(txq);
+ vmxnet3_dev_tx_queue_reset(txq);
}
}
@@ -220,7 +288,7 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
if (rxq != NULL) {
rxq->stopped = TRUE;
- vmxnet3_dev_rx_queue_release(rxq);
+ vmxnet3_dev_rx_queue_reset(rxq);
}
}
}
--
1.9.1
next prev parent reply other threads:[~2014-10-13 6:15 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-10-13 6:23 [dpdk-dev] [PATCH 0/5] vmxnet3 pmd fixes/improvement Yong Wang
2014-10-13 6:23 ` [dpdk-dev] [PATCH 1/5] vmxnet3: Fix VLAN Rx stripping Yong Wang
2014-10-13 9:31 ` Stephen Hemminger
2014-10-13 18:42 ` Yong Wang
2014-10-22 13:39 ` Stephen Hemminger
2014-10-28 21:57 ` Yong Wang
2014-10-29 9:04 ` Bruce Richardson
2014-10-29 9:41 ` Thomas Monjalon
2014-10-29 17:57 ` Yong Wang
2014-10-29 18:51 ` Thomas Monjalon
2014-10-13 6:23 ` [dpdk-dev] [PATCH 2/5] vmxnet3: Add VLAN Tx offload Yong Wang
2014-10-13 6:23 ` Yong Wang [this message]
2014-10-13 6:23 ` [dpdk-dev] [PATCH 4/5] vmxnet3: Add rx pkt check offloads Yong Wang
2014-10-13 6:23 ` [dpdk-dev] [PATCH 5/5] vmxnet3: Some perf improvement on the rx path Yong Wang
2014-11-05 0:13 ` Thomas Monjalon
2014-10-13 20:29 ` [dpdk-dev] [PATCH 0/5] vmxnet3 pmd fixes/improvement Thomas Monjalon
2014-10-13 21:00 ` Yong Wang
2014-10-21 22:10 ` Yong Wang
2014-10-22 7:07 ` Cao, Waterman
2014-10-28 14:40 ` Thomas Monjalon
2014-10-28 19:59 ` Yong Wang
2014-10-29 0:33 ` Cao, Waterman
2014-11-05 1:32 ` Cao, Waterman
2014-11-04 5:57 ` Zhang, XiaonanX
2014-11-04 22:50 ` Thomas Monjalon
2014-11-05 5:26 ` Cao, Waterman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1413181389-14887-4-git-send-email-yongwang@vmware.com \
--to=yongwang@vmware.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).