From: John Daley <johndale@cisco.com>
To: dev@dpdk.org
Cc: John Daley <johndale@cisco.com>
Subject: [dpdk-dev] [PATCH 08/11] enic: Tx perf - bulk recycle mbufs, refactor
Date: Fri, 20 May 2016 12:04:11 -0700 [thread overview]
Message-ID: <1463771054-16861-8-git-send-email-johndale@cisco.com> (raw)
In-Reply-To: <1463771054-16861-1-git-send-email-johndale@cisco.com>
Mbufs were returned to the pool one at a time. Use rte_mempool_put_bulk
instead. There were muiltiple function calls for each buffer returned.
Refactor this code into just 2 functions.
Signed-off-by: John Daley <johndale@cisco.com>
---
drivers/net/enic/base/vnic_wq.h | 27 ---------------------
drivers/net/enic/enic_rxtx.c | 54 ++++++++++++++++++++++++++---------------
2 files changed, 35 insertions(+), 46 deletions(-)
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index fe46bb4..689b81c 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -177,33 +177,6 @@ buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
return idx;
}
-static inline void vnic_wq_service(struct vnic_wq *wq,
- struct cq_desc *cq_desc, u16 completed_index,
- void (*buf_service)(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
- void *opaque)
-{
- struct vnic_wq_buf *buf;
- unsigned int to_clean = wq->tail_idx;
-
- buf = &wq->bufs[to_clean];
- while (1) {
-
- (*buf_service)(wq, cq_desc, buf, opaque);
-
- wq->ring.desc_avail++;
-
-
- to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
-
- if (to_clean == completed_index)
- break;
-
- buf = &wq->bufs[to_clean];
- }
- wq->tail_idx = to_clean;
-}
-
void vnic_wq_free(struct vnic_wq *wq);
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 24b7190..8bf7640 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -332,33 +332,49 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
-static void enic_wq_free_buf(struct vnic_wq *wq,
- __rte_unused struct cq_desc *cq_desc,
- struct vnic_wq_buf *buf,
- __rte_unused void *opaque)
+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
+ struct vnic_wq_buf *buf;
+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
+ unsigned int nb_to_free, nb_free = 0, i;
+ struct rte_mempool *pool;
+ unsigned int tail_idx;
+ unsigned int desc_count = wq->ring.desc_count;
+
+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ + 1;
+ tail_idx = wq->tail_idx;
+ buf = &wq->bufs[tail_idx];
+ pool = ((struct rte_mbuf *)buf->mb)->pool;
+ for (i = 0; i < nb_to_free; i++) {
+ buf = &wq->bufs[tail_idx];
+ m = (struct rte_mbuf *)(buf->mb);
+ if (likely(m->pool == pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ pool = m->pool;
+ }
+ tail_idx = enic_ring_incr(desc_count, tail_idx);
+ buf->mb = NULL;
+ }
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
- return 0;
+ wq->tail_idx = tail_idx;
+ wq->ring.desc_avail += nb_to_free;
}
-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
{
- u16 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
+ u16 completed_index;
+
+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
if (wq->last_completed_index != completed_index) {
- enic_wq_service(enic->vdev, NULL, 0, wq->index,
- completed_index, NULL);
+ enic_free_wq_bufs(wq, completed_index);
wq->last_completed_index = completed_index;
}
return 0;
--
2.7.0
next prev parent reply other threads:[~2016-05-20 19:05 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-20 19:04 [dpdk-dev] [PATCH 01/11] enic: fix Rx drop counters John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 02/11] enic: drop bad packets, remove unused rx error flag John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 03/11] enic: count truncated packets John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 04/11] enic: Tx cleanup - put Tx and Rx functions into same file John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 05/11] enic: Tx cleanup - remove some unused functions John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 06/11] enic: Tx perf - improve processing of mbufs held by driver John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 07/11] enic: Tx perf - use completion message instead of completion queue John Daley
2016-05-20 19:04 ` John Daley [this message]
2016-05-20 19:04 ` [dpdk-dev] [PATCH 09/11] enic: Tx perf - optimize the transmit function John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 10/11] enic: Tx cleanup - remove unused files, functions, variables John Daley
2016-05-20 19:04 ` [dpdk-dev] [PATCH 11/11] enic: add ENIC_ASSERT macro John Daley
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1463771054-16861-8-git-send-email-johndale@cisco.com \
--to=johndale@cisco.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).