DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tony Lu <zlu@ezchip.com>
To: 'Liming Sun' <lsun@ezchip.com>, <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH 2/3] driver/net/mpipe: optimize mpipe buffer	return mechanism.
Date: Fri, 8 Jan 2016 11:04:53 +0800	[thread overview]
Message-ID: <003101d149c1$687f3d20$397db760$@com> (raw)
In-Reply-To: <1450193851-9100-3-git-send-email-lsun@ezchip.com>

>-----Original Message-----
>From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Liming Sun
>Sent: Tuesday, December 15, 2015 11:38 PM
>To: dev@dpdk.org
>Subject: [dpdk-dev] [PATCH 2/3] driver/net/mpipe: optimize mpipe buffer
return
>mechanism.
>
>This submit has changes to optimize the mpipe buffer return. When
>a packet is received, instead of allocating and refilling the
>buffer stack right away, it tracks the number of pending buffers,
>and use HW buffer return as an optimization when the pending
>number is below certain threshold, thus save two MMIO writes and
>improves performance especially for bidirectional traffic case.
>
>Signed-off-by: Liming Sun <lsun@ezchip.com>
>---
> drivers/net/mpipe/mpipe_tilegx.c |   50
>++++++++++++++++++++++++++++++-------
> 1 files changed, 40 insertions(+), 10 deletions(-)
>
>diff --git a/drivers/net/mpipe/mpipe_tilegx.c
>b/drivers/net/mpipe/mpipe_tilegx.c
>index 35134ba..be7b6f2 100644
>--- a/drivers/net/mpipe/mpipe_tilegx.c
>+++ b/drivers/net/mpipe/mpipe_tilegx.c
>@@ -78,6 +78,13 @@ struct mpipe_context {
> 	struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
> };
>
>+/* Per-core local data. */
>+struct mpipe_local {
>+	int mbuf_push_debt[RTE_MAX_ETHPORTS];	/* Buffer push debt. */
>+} __rte_cache_aligned;
>+
>+#define MPIPE_BUF_DEBT_THRESHOLD	32
>+static __thread struct mpipe_local mpipe_local;
> static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
> static int mpipe_instances;
> static const char *drivername = "MPIPE PMD";
>@@ -137,7 +144,7 @@ struct mpipe_dev_priv {
> 	int first_bucket;		/* mPIPE bucket start index. */
> 	int first_ring;			/* mPIPE notif ring start index. */
> 	int notif_group;		/* mPIPE notif group. */
>-	rte_atomic32_t dp_count;	/* Active datapath thread count. */
>+	rte_atomic32_t dp_count __rte_cache_aligned;	/* DP Entry count.
*/
> 	int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
> 	int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
> };
>@@ -461,6 +468,14 @@ mpipe_dp_wait(struct mpipe_dev_priv *priv)
> 	}
> }
>
>+static inline int
>+mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
>+{
>+	return (mbuf->port < RTE_MAX_ETHPORTS)?
>+		mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
>+		priv->stack;
>+}
>+
> static inline struct rte_mbuf *
> mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
> 		int in_port)
>@@ -1267,6 +1282,7 @@ mpipe_do_xmit(struct mpipe_tx_queue *tx_queue,
>struct rte_mbuf **tx_pkts,
> 	unsigned nb_bytes = 0;
> 	unsigned nb_sent = 0;
> 	int nb_slots, i;
>+	uint8_t port_id;
>
> 	PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
> 		     nb_pkts, mpipe_name(tx_queue->q.priv),
>@@ -1315,14 +1331,23 @@ mpipe_do_xmit(struct mpipe_tx_queue *tx_queue,
>struct rte_mbuf **tx_pkts,
> 			if (priv->tx_comps[idx])
> 				rte_pktmbuf_free_seg(priv->tx_comps[idx]);
>
>+			port_id = (mbuf->port < RTE_MAX_ETHPORTS)?
>+						mbuf->port : priv->port_id;
> 			desc = (gxio_mpipe_edesc_t) { {
> 				.va        = rte_pktmbuf_mtod(mbuf,
uintptr_t),
> 				.xfer_size = rte_pktmbuf_data_len(mbuf),
> 				.bound     = next ? 0 : 1,
>+				.stack_idx = mpipe_mbuf_stack_index(priv,
mbuf),
> 			} };
>+			if (mpipe_local.mbuf_push_debt[port_id] > 0) {
>+				mpipe_local.mbuf_push_debt[port_id]--;
>+				desc.hwb = 1;
>+				priv->tx_comps[idx] = NULL;
>+			}
>+			else
>+				priv->tx_comps[idx] = mbuf;
>
> 			nb_bytes += mbuf->data_len;
>-			priv->tx_comps[idx] = mbuf;
> 			gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
>
> 			PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
>@@ -1443,17 +1468,22 @@ mpipe_do_recv(struct mpipe_rx_queue *rx_queue,
>struct rte_mbuf **rx_pkts,
> 				continue;
> 			}
>
>-			mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
>-			if (unlikely(!mbuf)) {
>-				nb_nomem++;
>-				gxio_mpipe_iqueue_drop(iqueue, idesc);
>-				PMD_DEBUG_RX("%s:%d: RX alloc failure\n",
>+			if (mpipe_local.mbuf_push_debt[in_port] <
>+					MPIPE_BUF_DEBT_THRESHOLD)
>+				mpipe_local.mbuf_push_debt[in_port]++;
>+			else {
>+				mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
>+				if (unlikely(!mbuf)) {
>+					nb_nomem++;
>+					gxio_mpipe_iqueue_drop(iqueue,
idesc);
>+					PMD_DEBUG_RX("%s:%d: alloc
failure\n",
> 					     mpipe_name(rx_queue->q.priv),
> 					     rx_queue->q.queue_idx);
>-				continue;
>-			}
>+					continue;
>+				}
>
>-			mpipe_recv_push(priv, mbuf);
>+				mpipe_recv_push(priv, mbuf);
>+			}
>
> 			/* Get and setup the mbuf for the received packet.
*/
> 			mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
>--
>1.7.1

Acked-by: Zhigang Lu <zlu@ezchip.com>

  reply	other threads:[~2016-01-08  3:05 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-15 15:37 [dpdk-dev] [PATCH 0/3] Some misc fixes and optimization for the mpipe driver Liming Sun
2015-12-15 15:37 ` [dpdk-dev] [PATCH 1/3] driver/net/mpipe: support native build on tilegx platform Liming Sun
2016-01-08  2:59   ` Tony Lu
2015-12-15 15:37 ` [dpdk-dev] [PATCH 2/3] driver/net/mpipe: optimize mpipe buffer return mechanism Liming Sun
2016-01-08  3:04   ` Tony Lu [this message]
2015-12-15 15:37 ` [dpdk-dev] [PATCH 3/3] driver/net/mpipe: fix a mpipe link initialization ordering issue Liming Sun
2016-01-08  3:08   ` Tony Lu
2016-01-08 14:30   ` [dpdk-dev] [PATCH v2 1/3] driver/net/mpipe: support native build on tilegx platform Liming Sun
2016-01-08 14:30     ` [dpdk-dev] [PATCH v2 2/3] driver/net/mpipe: optimize mpipe buffer return mechanism Liming Sun
2016-01-08 14:30     ` [dpdk-dev] [PATCH v2 3/3] driver/net/mpipe: fix a mpipe link initialization ordering issue Liming Sun
2016-02-09 15:49     ` [dpdk-dev] [PATCH v2 1/3] driver/net/mpipe: support native build on tilegx platform Bruce Richardson
2016-02-09 16:16     ` Thomas Monjalon
2016-02-09 18:37       ` Liming Sun
2016-02-09 20:33         ` Thomas Monjalon
2016-02-09 21:15           ` Liming Sun
2016-02-09 22:47             ` Thomas Monjalon
2016-02-10  9:49               ` Bruce Richardson
2016-02-10 10:00                 ` Thomas Monjalon
2016-03-08 19:48           ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='003101d149c1$687f3d20$397db760$@com' \
    --to=zlu@ezchip.com \
    --cc=dev@dpdk.org \
    --cc=lsun@ezchip.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).