DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: "ferruh.yigit@intel.com" <ferruh.yigit@intel.com>,
	Shreyansh Jain <shreyansh.jain@nxp.com>,
	Akhil Goyal <akhil.goyal@nxp.com>
Subject: [dpdk-dev] [PATCH 2/4] mempool/dpaa: bp info dynamic allocation for multiprocess
Date: Tue, 26 Mar 2019 12:01:45 +0000	[thread overview]
Message-ID: <20190326115952.26278-2-hemant.agrawal@nxp.com> (raw)
Message-ID: <20190326120145.d6OPpz4Uo0w6Ea3TIYnVmIVzCseqzB7J0phj5VJycUU@z> (raw)
In-Reply-To: <20190326115952.26278-1-hemant.agrawal@nxp.com>

From: Akhil Goyal <akhil.goyal@nxp.com>

rte_dpaa_bpid_info shall be allocated with the hugepage memory
which can be shared across processes.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/bus/dpaa/include/fsl_qman.h |  1 +
 drivers/mempool/dpaa/dpaa_mempool.c | 10 +++++++++-
 drivers/mempool/dpaa/dpaa_mempool.h |  4 ++--
 drivers/net/dpaa/dpaa_ethdev.c      |  1 +
 drivers/net/dpaa/dpaa_rxtx.c        |  6 +++++-
 5 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index ef598ccff..e5cccbbea 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1225,6 +1225,7 @@ struct qman_fq {
 	struct rte_event ev;
 	/* affined portal in case of static queue */
 	struct qman_portal *qp;
+	struct dpaa_bp_info *bp_array;
 
 	volatile unsigned long flags;
 
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index 021b366fe..003081772 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -35,7 +35,7 @@
 struct dpaa_memseg_list rte_dpaa_memsegs
 	= TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
 
-struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+struct dpaa_bp_info *rte_dpaa_bpid_info;
 
 static int
 dpaa_mbuf_create_pool(struct rte_mempool *mp)
@@ -74,6 +74,14 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
 		DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
 				  num_bufs, bpid);
 
+	if (rte_dpaa_bpid_info == NULL) {
+		rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
+				sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
+				RTE_CACHE_LINE_SIZE);
+		if (rte_dpaa_bpid_info == NULL)
+			return -ENOMEM;
+	}
+
 	rte_dpaa_bpid_info[bpid].mp = mp;
 	rte_dpaa_bpid_info[bpid].bpid = bpid;
 	rte_dpaa_bpid_info[bpid].size = mp->elt_size;
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 533e1c6e2..f69e11f01 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
- *   Copyright 2017 NXP
+ *   Copyright 2017,2019 NXP
  *
  */
 #ifndef __DPAA_MEMPOOL_H__
@@ -54,7 +54,7 @@ DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info __rte_unused, uint64_t addr)
 #define DPAA_MEMPOOL_TO_BPID(__mp) \
 	(((struct dpaa_bp_info *)__mp->pool_data)->bpid)
 
-extern struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+extern struct dpaa_bp_info *rte_dpaa_bpid_info;
 
 #define DPAA_BPID_TO_POOL_INFO(__bpid) (&rte_dpaa_bpid_info[__bpid])
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index d42ac6286..d124169c5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -673,6 +673,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 		rxq->is_static = true;
 	}
+	rxq->bp_array = rte_dpaa_bpid_info;
 	dev->data->rx_queues[queue_idx] = rxq;
 
 	/* configure the CGR size as per the desc size */
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index c4471c227..a4085f47e 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017 NXP
+ *   Copyright 2017,2019 NXP
  *
  */
 
@@ -598,6 +598,10 @@ uint16_t dpaa_eth_queue_rx(void *q,
 	int num_rx_bufs, ret;
 	uint32_t vdqcr_flags = 0;
 
+	if (unlikely(rte_dpaa_bpid_info == NULL &&
+				rte_eal_process_type() == RTE_PROC_SECONDARY))
+		rte_dpaa_bpid_info = fq->bp_array;
+
 	if (likely(fq->is_static))
 		return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
 
-- 
2.17.1


  parent reply	other threads:[~2019-03-26 12:01 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-26 12:01 [dpdk-dev] [PATCH 1/4] bus/dpaa: fq lookup table saved for secondary process Hemant Agrawal
2019-03-26 12:01 ` Hemant Agrawal
2019-03-26 12:01 ` Hemant Agrawal [this message]
2019-03-26 12:01   ` [dpdk-dev] [PATCH 2/4] mempool/dpaa: bp info dynamic allocation for multiprocess Hemant Agrawal
2019-03-26 12:01 ` [dpdk-dev] [PATCH 3/4] bus/dpaa: delay fman device list to bus probe Hemant Agrawal
2019-03-26 12:01   ` Hemant Agrawal
2019-03-26 12:01 ` [dpdk-dev] [PATCH 4/4] net/dpaa2: add support for flow table flush Hemant Agrawal
2019-03-26 12:01   ` Hemant Agrawal
2019-03-29 13:33 ` [dpdk-dev] [PATCH 1/4] bus/dpaa: fq lookup table saved for secondary process Thomas Monjalon
2019-03-29 13:33   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190326115952.26278-2-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=shreyansh.jain@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).