DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] mbuf: use pktmbuf helper to create the pool
@ 2017-01-17 18:42 Hemant Agrawal
  2017-01-17 13:31 ` Santosh Shukla
  2017-01-20  7:11 ` [dpdk-dev] [PATCH v2] " Hemant Agrawal
  0 siblings, 2 replies; 12+ messages in thread
From: Hemant Agrawal @ 2017-01-17 18:42 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev

When possible, replace the uses of rte_mempool_create() with
the helper provided in librte_mbuf: rte_pktmbuf_pool_create().

This is the preferred way to create a mbuf pool.

This also updates the documentation.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
This patch is derived from the RFC from Olivier:
http://dpdk.org/dev/patchwork/patch/15925/

 app/test/test_link_bonding_rssconf.c               | 11 ++++----
 doc/guides/sample_app_ug/ip_reassembly.rst         | 13 +++++----
 doc/guides/sample_app_ug/ipv4_multicast.rst        | 12 ++++----
 doc/guides/sample_app_ug/l2_forward_job_stats.rst  | 33 ++++++++--------------
 .../sample_app_ug/l2_forward_real_virtual.rst      | 26 +++++++----------
 doc/guides/sample_app_ug/ptpclient.rst             | 11 ++------
 doc/guides/sample_app_ug/quota_watermark.rst       | 26 ++++++-----------
 drivers/net/bonding/rte_eth_bond_8023ad.c          | 13 ++++-----
 examples/ip_pipeline/init.c                        | 18 ++++++------
 examples/ip_reassembly/main.c                      | 16 +++++------
 examples/multi_process/l2fwd_fork/main.c           | 13 +++------
 examples/tep_termination/main.c                    | 16 +++++------
 lib/librte_mbuf/rte_mbuf.c                         |  7 +++--
 lib/librte_mbuf/rte_mbuf.h                         | 29 +++++++++++--------
 14 files changed, 106 insertions(+), 138 deletions(-)

diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index 34f1c16..9034f62 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -67,7 +67,7 @@
 #define SLAVE_RXTX_QUEUE_FMT      ("rssconf_slave%d_q%d")
 
 #define NUM_MBUFS 8191
-#define MBUF_SIZE (1600 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_SIZE (1600 + RTE_PKTMBUF_HEADROOM)
 #define MBUF_CACHE_SIZE 250
 #define BURST_SIZE 32
 
@@ -536,13 +536,12 @@ struct link_bonding_rssconf_unittest_params {
 
 	if (test_params.mbuf_pool == NULL) {
 
-		test_params.mbuf_pool = rte_mempool_create("RSS_MBUF_POOL", NUM_MBUFS *
-				SLAVE_COUNT, MBUF_SIZE, MBUF_CACHE_SIZE,
-				sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
-				NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+		test_params.mbuf_pool = rte_pktmbuf_pool_create(
+			"RSS_MBUF_POOL", NUM_MBUFS * SLAVE_COUNT,
+			MBUF_CACHE_SIZE, 0, MBUF_SIZE, rte_socket_id());
 
 		TEST_ASSERT(test_params.mbuf_pool != NULL,
-				"rte_mempool_create failed\n");
+				"rte_pktmbuf_pool_create failed\n");
 	}
 
 	/* Create / initialize ring eth devs. */
diff --git a/doc/guides/sample_app_ug/ip_reassembly.rst b/doc/guides/sample_app_ug/ip_reassembly.rst
index 3c5cc70..d5097c6 100644
--- a/doc/guides/sample_app_ug/ip_reassembly.rst
+++ b/doc/guides/sample_app_ug/ip_reassembly.rst
@@ -223,11 +223,14 @@ each RX queue uses its own mempool.
 
     snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
 
-    if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL,
-        rte_pktmbuf_init, NULL, socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
-
-            RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
-            return -1;
+    rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf,
+    	0, /* cache size */
+    	0, /* priv size */
+    	MBUF_DATA_SIZE, socket);
+    if (rxq->pool == NULL) {
+    	RTE_LOG(ERR, IP_RSMBL,
+    		"rte_pktmbuf_pool_create(%s) failed", buf);
+    	return -1;
     }
 
 Packet Reassembly and Forwarding
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 72da8c4..d9ff249 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -145,12 +145,12 @@ Memory pools for indirect buffers are initialized differently from the memory po
 
 .. code-block:: c
 
-    packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF, PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
-                                     rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
-
-    header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF, HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
-    clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF,
-    CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+    packet_pool = rte_pktmbuf_pool_create("packet_pool", NB_PKT_MBUF, 32,
+    	0, PKT_MBUF_DATA_SIZE, rte_socket_id());
+    header_pool = rte_pktmbuf_pool_create("header_pool", NB_HDR_MBUF, 32,
+    	0, HDR_MBUF_DATA_SIZE, rte_socket_id());
+    clone_pool = rte_pktmbuf_pool_create("clone_pool", NB_CLONE_MBUF, 32,
+    	0, 0, rte_socket_id());
 
 The reason for this is because indirect buffers are not supposed to hold any packet data and
 therefore can be initialized with lower amount of reserved memory for each buffer.
diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
index 2444e36..a606b86 100644
--- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst
+++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -193,36 +193,25 @@ and the application to store network packet data:
 .. code-block:: c
 
     /* create the mbuf pool */
-    l2fwd_pktmbuf_pool =
-        rte_mempool_create("mbuf_pool", NB_MBUF,
-                   MBUF_SIZE, 32,
-                   sizeof(struct rte_pktmbuf_pool_private),
-                   rte_pktmbuf_pool_init, NULL,
-                   rte_pktmbuf_init, NULL,
-                   rte_socket_id(), 0);
+    l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+    	MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+    	rte_socket_id());
 
     if (l2fwd_pktmbuf_pool == NULL)
         rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
 
 The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure,
-sizeof(struct rte_pktmbuf_pool_private) bytes.
-The number of allocated pkt mbufs is NB_MBUF, with a size of MBUF_SIZE each.
-A per-lcore cache of 32 mbufs is kept.
+In this case, it is necessary to create a pool that will be used by the driver.
+The number of allocated pkt mbufs is NB_MBUF, with a data room size of
+RTE_MBUF_DEFAULT_BUF_SIZE each.
+A per-lcore cache of MEMPOOL_CACHE_SIZE mbufs is kept.
 The memory is allocated in rte_socket_id() socket,
 but it is possible to extend this code to allocate one mbuf pool per socket.
 
-Two callback pointers are also given to the rte_mempool_create() function:
-
-*   The first callback pointer is to rte_pktmbuf_pool_init() and is used
-    to initialize the private data of the mempool, which is needed by the driver.
-    This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-*   The second callback pointer given to rte_mempool_create() is the mbuf initializer.
-    The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
-    If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
-    a new function derived from rte_pktmbuf_init( ) can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
 
 Driver Initialization
 ~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
index cf15d1c..de86ac8 100644
--- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
+++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
@@ -207,31 +207,25 @@ and the application to store network packet data:
 
     /* create the mbuf pool */
 
-    l2fwd_pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
-        rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, SOCKET0, 0);
+    l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+    	MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+    	rte_socket_id());
 
     if (l2fwd_pktmbuf_pool == NULL)
         rte_panic("Cannot init mbuf pool\n");
 
 The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure,
-sizeof(struct rte_pktmbuf_pool_private) bytes.
-The number of allocated pkt mbufs is NB_MBUF, with a size of MBUF_SIZE each.
+In this case, it is necessary to create a pool that will be used by the driver.
+The number of allocated pkt mbufs is NB_MBUF, with a data room size of
+RTE_MBUF_DEFAULT_BUF_SIZE each.
 A per-lcore cache of 32 mbufs is kept.
 The memory is allocated in NUMA socket 0,
 but it is possible to extend this code to allocate one mbuf pool per socket.
 
-Two callback pointers are also given to the rte_mempool_create() function:
-
-*   The first callback pointer is to rte_pktmbuf_pool_init() and is used
-    to initialize the private data of the mempool, which is needed by the driver.
-    This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-*   The second callback pointer given to rte_mempool_create() is the mbuf initializer.
-    The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
-    If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
-    a new function derived from rte_pktmbuf_init( ) can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
 
 .. _l2_fwd_app_dvr_init:
 
diff --git a/doc/guides/sample_app_ug/ptpclient.rst b/doc/guides/sample_app_ug/ptpclient.rst
index 6e425b7..405a267 100644
--- a/doc/guides/sample_app_ug/ptpclient.rst
+++ b/doc/guides/sample_app_ug/ptpclient.rst
@@ -171,15 +171,8 @@ used by the application:
 
 .. code-block:: c
 
-    mbuf_pool = rte_mempool_create("MBUF_POOL",
-                                   NUM_MBUFS * nb_ports,
-                                   MBUF_SIZE,
-                                   MBUF_CACHE_SIZE,
-                                   sizeof(struct rte_pktmbuf_pool_private),
-                                   rte_pktmbuf_pool_init, NULL,
-                                   rte_pktmbuf_init,      NULL,
-                                   rte_socket_id(),
-                                   0);
+    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+    	MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
 
 Mbufs are the packet buffer structure used by DPDK. They are explained in
 detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index c56683a..a0da8fe 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -254,32 +254,24 @@ It contains a set of mbuf objects that are used by the driver and the applicatio
 .. code-block:: c
 
     /* Create a pool of mbuf to store packets */
-
-    mbuf_pool = rte_mempool_create("mbuf_pool", MBUF_PER_POOL, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
-        rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+    mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
+    	MBUF_DATA_SIZE, rte_socket_id());
 
     if (mbuf_pool == NULL)
         rte_panic("%s\n", rte_strerror(rte_errno));
 
 The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure, sizeof(struct rte_pktmbuf_pool_private) bytes.
+In this case, it is necessary to create a pool that will be used by the driver.
 
-The number of allocated pkt mbufs is MBUF_PER_POOL, with a size of MBUF_SIZE each.
+The number of allocated pkt mbufs is MBUF_PER_POOL, with a data room size
+of MBUF_DATA_SIZE each.
 A per-lcore cache of 32 mbufs is kept.
 The memory is allocated in on the master lcore's socket, but it is possible to extend this code to allocate one mbuf pool per socket.
 
-Two callback pointers are also given to the rte_mempool_create() function:
-
-*   The first callback pointer is to rte_pktmbuf_pool_init() and is used to initialize the private data of the mempool,
-    which is needed by the driver.
-    This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-*   The second callback pointer given to rte_mempool_create() is the mbuf initializer.
-
-The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
-If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
-a new function derived from rte_pktmbuf_init() can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
 
 Ports Configuration and Pairing
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2f7ae70..af211ca 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -888,8 +888,8 @@
 	RTE_ASSERT(port->tx_ring == NULL);
 	socket_id = rte_eth_devices[slave_id].data->numa_node;
 
-	element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
-				+ RTE_PKTMBUF_HEADROOM;
+	element_size = sizeof(struct slow_protocol_frame) +
+		RTE_PKTMBUF_HEADROOM;
 
 	/* The size of the mempool should be at least:
 	 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
@@ -900,11 +900,10 @@
 	}
 
 	snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
-	port->mbuf_pool = rte_mempool_create(mem_name,
-		total_tx_desc, element_size,
-		RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
-		sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
-		NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD);
+	port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
+		RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
+			32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+		0, element_size, socket_id);
 
 	/* Any memory allocation failure in initalization is critical because
 	 * resources can't be free, so reinitialization is impossible. */
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 3b36b53..d55c3b4 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -324,16 +324,14 @@
 		struct app_mempool_params *p = &app->mempool_params[i];
 
 		APP_LOG(app, HIGH, "Initializing %s ...", p->name);
-		app->mempool[i] = rte_mempool_create(
-				p->name,
-				p->pool_size,
-				p->buffer_size,
-				p->cache_size,
-				sizeof(struct rte_pktmbuf_pool_private),
-				rte_pktmbuf_pool_init, NULL,
-				rte_pktmbuf_init, NULL,
-				p->cpu_socket_id,
-				0);
+		app->mempool[i] = rte_pktmbuf_pool_create(
+			p->name,
+			p->pool_size,
+			p->cache_size,
+			0, /* priv_size */
+			p->buffer_size -
+				sizeof(struct rte_mbuf), /* mbuf data size */
+			p->cpu_socket_id);
 
 		if (app->mempool[i] == NULL)
 			rte_panic("%s init error\n", p->name);
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 50fe422..f6378bf 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -84,9 +84,7 @@
 
 #define MAX_JUMBO_PKT_LEN  9600
 
-#define	BUF_SIZE	RTE_MBUF_DEFAULT_DATAROOM
-#define MBUF_SIZE	\
-	(BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define	MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
 
 #define NB_MBUF 8192
 
@@ -909,11 +907,13 @@ struct rte_lpm6_config lpm6_config = {
 
 	snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
 
-	if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
-			sizeof(struct rte_pktmbuf_pool_private),
-			rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
-			socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
-		RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
+	rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf,
+		0, /* cache size */
+		0, /* priv size */
+		MBUF_DATA_SIZE, socket);
+	if (rxq->pool == NULL) {
+		RTE_LOG(ERR, IP_RSMBL,
+			"rte_pktmbuf_pool_create(%s) failed", buf);
 		return -1;
 	}
 
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index 2d951d9..b34916e 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -77,8 +77,7 @@
 
 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
 #define MBUF_NAME	"mbuf_pool_%d"
-#define MBUF_SIZE	\
-(RTE_MBUF_DEFAULT_DATAROOM + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
 #define NB_MBUF   8192
 #define RING_MASTER_NAME	"l2fwd_ring_m2s_"
 #define RING_SLAVE_NAME		"l2fwd_ring_s2m_"
@@ -989,14 +988,10 @@ struct l2fwd_port_statistics {
 		flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
 		snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
 		l2fwd_pktmbuf_pool[portid] =
-			rte_mempool_create(buf_name, NB_MBUF,
-					   MBUF_SIZE, 32,
-					   sizeof(struct rte_pktmbuf_pool_private),
-					   rte_pktmbuf_pool_init, NULL,
-					   rte_pktmbuf_init, NULL,
-					   rte_socket_id(), flags);
+			rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
+				0, MBUF_DATA_SIZE, rte_socket_id());
 		if (l2fwd_pktmbuf_pool[portid] == NULL)
-			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+			rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 
 		printf("Create mbuf %s\n", buf_name);
 	}
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index bd1dc96..20dafdb 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -68,7 +68,7 @@
 				(nb_switching_cores * MBUF_CACHE_SIZE))
 
 #define MBUF_CACHE_SIZE 128
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
 
 #define MAX_PKT_BURST 32	/* Max burst size for RX/TX */
 #define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
@@ -1199,15 +1199,13 @@ static inline void __attribute__((always_inline))
 			MAX_SUP_PORTS);
 	}
 	/* Create the mbuf pool. */
-	mbuf_pool = rte_mempool_create(
+	mbuf_pool = rte_pktmbuf_pool_create(
 			"MBUF_POOL",
-			NUM_MBUFS_PER_PORT
-			* valid_nb_ports,
-			MBUF_SIZE, MBUF_CACHE_SIZE,
-			sizeof(struct rte_pktmbuf_pool_private),
-			rte_pktmbuf_pool_init, NULL,
-			rte_pktmbuf_init, NULL,
-			rte_socket_id(), 0);
+			NUM_MBUFS_PER_PORT * valid_nb_ports,
+			MBUF_CACHE_SIZE,
+			0,
+			MBUF_DATA_SIZE,
+			rte_socket_id());
 	if (mbuf_pool == NULL)
 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 72ad91e..3fb2700 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -62,7 +62,7 @@
 
 /*
  * ctrlmbuf constructor, given as a callback function to
- * rte_mempool_create()
+ * rte_mempool_obj_iter() or rte_mempool_create()
  */
 void
 rte_ctrlmbuf_init(struct rte_mempool *mp,
@@ -77,7 +77,8 @@
 
 /*
  * pktmbuf pool constructor, given as a callback function to
- * rte_mempool_create()
+ * rte_mempool_create(), or called directly if using
+ * rte_mempool_create_empty()/rte_mempool_populate()
  */
 void
 rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
@@ -110,7 +111,7 @@
 
 /*
  * pktmbuf constructor, given as a callback function to
- * rte_mempool_create().
+ * rte_mempool_obj_iter() or rte_mempool_create().
  * Set the fields of a packet mbuf to their default values.
  */
 void
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index bfce9f4..b1d4ccb 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -44,6 +44,13 @@
  * buffers. The message buffers are stored in a mempool, using the
  * RTE mempool library.
  *
+ * The preferred way to create a mbuf pool is to use
+ * rte_pktmbuf_pool_create(). However, in some situations, an
+ * application may want to have more control (ex: populate the pool with
+ * specific memory), in this case it is possible to use functions from
+ * rte_mempool. See how rte_pktmbuf_pool_create() is implemented for
+ * details.
+ *
  * This library provides an API to allocate/free packet mbufs, which are
  * used to carry network packets.
  *
@@ -810,14 +817,14 @@ static inline void __attribute__((always_inline))
  * This function initializes some fields in an mbuf structure that are
  * not modified by the user once created (mbuf type, origin pool, buffer
  * start address, and so on). This function is given as a callback function
- * to rte_mempool_create() at pool creation time.
+ * to rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
  *
  * @param mp
  *   The mempool from which the mbuf is allocated.
  * @param opaque_arg
  *   A pointer that can be used by the user to retrieve useful information
- *   for mbuf initialization. This pointer comes from the ``init_arg``
- *   parameter of rte_mempool_create().
+ *   for mbuf initialization. This pointer is the opaque argument passed to
+ *   rte_mempool_obj_iter() or rte_mempool_create().
  * @param m
  *   The mbuf to initialize.
  * @param i
@@ -891,14 +898,14 @@ void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
  * This function initializes some fields in the mbuf structure that are
  * not modified by the user once created (origin pool, buffer start
  * address, and so on). This function is given as a callback function to
- * rte_mempool_create() at pool creation time.
+ * rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
  *
  * @param mp
  *   The mempool from which mbufs originate.
  * @param opaque_arg
  *   A pointer that can be used by the user to retrieve useful information
- *   for mbuf initialization. This pointer comes from the ``init_arg``
- *   parameter of rte_mempool_create().
+ *   for mbuf initialization. This pointer is the opaque argument passed to
+ *   rte_mempool_obj_iter() or rte_mempool_create().
  * @param m
  *   The mbuf to initialize.
  * @param i
@@ -913,7 +920,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
  *
  * This function initializes the mempool private data in the case of a
  * pktmbuf pool. This private data is needed by the driver. The
- * function is given as a callback function to rte_mempool_create() at
+ * function must be called on the mempool before it is used, or it
+ * can be given as a callback function to rte_mempool_create() at
  * pool creation. It can be extended by the user, for example, to
  * provide another packet size.
  *
@@ -921,8 +929,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
  *   The mempool from which mbufs originate.
  * @param opaque_arg
  *   A pointer that can be used by the user to retrieve useful information
- *   for mbuf initialization. This pointer comes from the ``init_arg``
- *   parameter of rte_mempool_create().
+ *   for mbuf initialization. This pointer is the opaque argument passed to
+ *   rte_mempool_create().
  */
 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
 
@@ -930,8 +938,7 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
  * Create a mbuf pool.
  *
  * This function creates and initializes a packet mbuf pool. It is
- * a wrapper to rte_mempool_create() with the proper packet constructor
- * and mempool constructor.
+ * a wrapper to rte_mempool functions.
  *
  * @param name
  *   The name of the mbuf pool.
-- 
1.9.1

^ permalink raw reply	[flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCHv4 00/33]  NXP DPAA2 PMD
@ 2017-01-17 18:52 Hemant Agrawal
  2017-01-19 13:23 ` [dpdk-dev] [PATCHv5 " Hemant Agrawal
  0 siblings, 1 reply; 12+ messages in thread
From: Hemant Agrawal @ 2017-01-17 18:52 UTC (permalink / raw)
  To: dev
  Cc: thomas.monjalon, bruce.richardson, shreyansh.jain, john.mcnamara,
	ferruh.yigit, jerin.jacob

The patch series adds NXP’s QorIQ-Layerscape DPAA2 Architecture based
fsl-mc bus driver and network SoC PMD.  This version of the driver
supports NXP LS208xA, LS204xA and LS108x families Network SoCs.

DPAA2, or Data Path Acceleration Architecture, is a hardware architecture
designed for high-speed network packet processing. It uses a bus name
‘fsl-mc’, part of Linux Kernel Staging tree [3], for resource management.

A brief description of architecture is given below; detailed description
is part of the documentation in the patches itself.

DPAA2 contains hardware component called the Management Complex (or MC).
It manages the DPAA2 hardware resources.  The MC provides an object-based
abstraction for software drivers to use the DPAA2 hardware.

Some of the key objects are:
    - DPNI, which refers to the network interface object.
    - DPBP, which refers to HW based memory pool object
    - DPIO, refers to processing context for accessing QBMAN

Besides the MC, DPAA2 also includes a Hardware based Queue and Buffer Manager
called QBMAN. Prime responsibility of QBMAN is to allow lockless access to
software/user-space to the queues and buffers implemented in the hardware.

The patch series could be logically structured into following sub-areas:
2. (Patch 0001) Enabling crc in armv8 core machine type
3. (Patch 0002) DPAA2 Architecture overview document
4. (Patch 0003) Common dpaa2 hw accelerator drivers for QBMAN.
5. (Patches 0004-0012) introduce fsl-mc bus
6. (Patches 0013-0017) introduce DPAA2 PMD, DPIO and mempool
7. (Patches 0018-0031) Support for DPAA2 Ethernet Device (ethdev)
7. (Patches 0032-0033) Additional functionality in DPAA2 ethdev.

The following design decisions are made during development:

1. DPAA2 implements a new bus called "fsl-mc" and some common accelerator drivers.
   These drivers will be shared with dpaa2 based crypto drivers.
 - For this, patch series from Shreyansh [1] has been used for creating a
   bus handler.

2. DPAA2 implements the HW mempool offload with DPBP object.
 - The new pool is being configured using compile time option and pool name
   as "dpaa2".

3. It maintains per lcore DPIO objects and affine the DPIO instance to the
   processing threads accessing the QBMAN HW.

Prerequisites:
 - For running the PMD, NXP's SoC (board) and SDK (software/BSP) is required.
   Information about obtaining relevant software is available in the docs
   as part of the patch.
 - At present the series has limited support for Ethernet functions. But,
   more functionality would be made available in a phased manner.
 - This PMD has been validated over the Bus Model [1] or/and SoC Patchset [3]

Future Changes/Caveats:

1. VFIO code for fsl-mc bus is different than eal-vfio code for pci bus.
   This need to be re-worked to make possible re-use of the existing code.

2. shared lib support in case of parallel build fails due to dependency of
   drivers/common, pool, etc on other driver components. The dependency graph
   need to be improved to support it.

Dependencies:

[1] http://dpdk.org/dev/patchwork/patch/19557/

References:

[2] https://www.kernel.org/doc/readme/drivers-staging-fsl-mc-README.txt
[3] http://dpdk.org/ml/archives/dev/2016-October/048949.html

---
v4:
* rebased over master (1feda4d8) and patches from Shreyansh [1] for Bus Arch.

v3:
* rebased over master (eac901ce2) and patches from Shreyansh [1] for Bus Arch.
* Fixed comment from John on Patch-0003 for documentation
* Removed Patch-0001 for rte_device in rte_eth_dev; Already upstreamed through
  another series

v2:
* separated the "fsl-mc" bus from the dpaa2 pmd driver - introduced drivers/bus
* separated the "dpaa2" hw mempool from dpaa2 pmd driver - introduced drivers/pool
* removed documentation warnings and missing information.
* removed arm64 part specific code from driver
* changed rte_panic to errors
* reduced checkpatch warnings


Hemant Agrawal (33):
  mk/dpaa2: add the crc support to the machine type
  doc: add dpaa2 nic details
  drivers/common/dpaa2: adding qbman driver
  bus/fslmc: introducing fsl-mc bus driver
  bus/fslmc: introduce mc object functions
  bus/fslmc: add mc dpni object support
  bus/fslmc: add mc dpio object support
  bus/fslmc: add mc dpbp object support
  bus/fslmc: add mc dpseci object support
  eal/vfio: adding vfio utility functions in map file
  bus/fslmc: add vfio support
  bus/fslmc: scan for net and sec devices
  net/dpaa2: introducing NXP dpaa2 pmd driver
  bus/fslmc: add debug log message support
  drivers/common/dpaa2: dpio portal driver
  drivers/pool/dpaa2: adding hw offloaded mempool
  drivers/common/dpaa2: dpio routine to affine to crypto threads
  net/dpaa2: adding eth ops to dpaa2
  net/dpaa2: add rss flow distribution
  net/dpaa2: configure mac address at init
  net/dpaa2: attach the buffer pool to dpni
  net/dpaa2: add support for l3 and l4 checksum offload
  net/dpaa2: add support for promiscuous mode
  net/dpaa2: add mtu config support
  net/dpaa2: add packet rx and tx support
  net/dpaa2: rx packet parsing and packet type support
  net/dpaa2: link status update
  net/dpaa2: basic stats support
  net/dpaa2: enable stashing for LS2088A devices
  net/dpaa2: add support for non hw buffer pool packet transmit
  net/dpaa2: enabling the use of physical addresses
  bus/fslmc: add support for dmamap to ARM SMMU
  drivers/common/dpaa2: frame queue based dq storage alloc

 MAINTAINERS                                        |    8 +
 config/common_base                                 |   22 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc          |   28 +-
 doc/guides/nics/dpaa2.rst                          |  594 ++++++++
 doc/guides/nics/features/dpaa2.ini                 |   18 +
 doc/guides/nics/index.rst                          |    1 +
 doc/guides/rel_notes/release_17_02.rst             |   11 +
 drivers/Makefile                                   |    3 +
 drivers/bus/Makefile                               |   38 +
 drivers/bus/fslmc/Makefile                         |   75 +
 drivers/bus/fslmc/fslmc_bus.c                      |  149 ++
 drivers/bus/fslmc/fslmc_logs.h                     |   76 +
 drivers/bus/fslmc/fslmc_vfio.c                     |  629 +++++++++
 drivers/bus/fslmc/fslmc_vfio.h                     |   82 ++
 drivers/bus/fslmc/mc/dpbp.c                        |  230 +++
 drivers/bus/fslmc/mc/dpio.c                        |  272 ++++
 drivers/bus/fslmc/mc/dpni.c                        |  732 ++++++++++
 drivers/bus/fslmc/mc/dpseci.c                      |  527 +++++++
 drivers/bus/fslmc/mc/fsl_dpbp.h                    |  220 +++
 drivers/bus/fslmc/mc/fsl_dpbp_cmd.h                |   76 +
 drivers/bus/fslmc/mc/fsl_dpio.h                    |  275 ++++
 drivers/bus/fslmc/mc/fsl_dpio_cmd.h                |  114 ++
 drivers/bus/fslmc/mc/fsl_dpkg.h                    |  177 +++
 drivers/bus/fslmc/mc/fsl_dpni.h                    | 1210 ++++++++++++++++
 drivers/bus/fslmc/mc/fsl_dpni_cmd.h                |  327 +++++
 drivers/bus/fslmc/mc/fsl_dpseci.h                  |  661 +++++++++
 drivers/bus/fslmc/mc/fsl_dpseci_cmd.h              |  248 ++++
 drivers/bus/fslmc/mc/fsl_mc_cmd.h                  |  231 +++
 drivers/bus/fslmc/mc/fsl_mc_sys.h                  |   98 ++
 drivers/bus/fslmc/mc/fsl_net.h                     |  480 +++++++
 drivers/bus/fslmc/mc/mc_sys.c                      |  107 ++
 drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c           |  137 ++
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c           |  441 ++++++
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.h           |   70 +
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h            |  247 ++++
 drivers/bus/fslmc/rte_fslmc.h                      |  147 ++
 drivers/bus/fslmc/rte_pmd_fslmcbus_version.map     |   62 +
 drivers/common/Makefile                            |   45 +
 drivers/common/dpaa2/Makefile                      |   36 +
 drivers/common/dpaa2/qbman/Makefile                |   58 +
 drivers/common/dpaa2/qbman/include/compat.h        |  403 ++++++
 .../common/dpaa2/qbman/include/fsl_qbman_base.h    |  157 ++
 .../common/dpaa2/qbman/include/fsl_qbman_portal.h  | 1090 ++++++++++++++
 drivers/common/dpaa2/qbman/qbman_portal.c          | 1492 ++++++++++++++++++++
 drivers/common/dpaa2/qbman/qbman_portal.h          |  274 ++++
 drivers/common/dpaa2/qbman/qbman_private.h         |  167 +++
 drivers/common/dpaa2/qbman/qbman_sys.h             |  382 +++++
 drivers/common/dpaa2/qbman/qbman_sys_decl.h        |   70 +
 .../dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map    |   27 +
 drivers/net/Makefile                               |    2 +-
 drivers/net/dpaa2/Makefile                         |   72 +
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c             |  344 +++++
 drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h       |  257 ++++
 drivers/net/dpaa2/dpaa2_ethdev.c                   | 1053 ++++++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.h                   |   83 ++
 drivers/net/dpaa2/dpaa2_rxtx.c                     |  421 ++++++
 drivers/net/dpaa2/rte_pmd_dpaa2_version.map        |    4 +
 drivers/pool/Makefile                              |   38 +
 drivers/pool/dpaa2/Makefile                        |   67 +
 drivers/pool/dpaa2/dpaa2_hw_mempool.c              |  352 +++++
 drivers/pool/dpaa2/dpaa2_hw_mempool.h              |   95 ++
 drivers/pool/dpaa2/rte_pmd_dpaa2_pool_version.map  |    8 +
 lib/librte_eal/bsdapp/eal/rte_eal_version.map      |    3 +
 lib/librte_eal/linuxapp/eal/rte_eal_version.map    |    3 +
 mk/machine/dpaa2/rte.vars.mk                       |    5 +-
 mk/rte.app.mk                                      |    6 +
 66 files changed, 15833 insertions(+), 4 deletions(-)
 create mode 100644 doc/guides/nics/dpaa2.rst
 create mode 100644 doc/guides/nics/features/dpaa2.ini
 create mode 100644 drivers/bus/Makefile
 create mode 100644 drivers/bus/fslmc/Makefile
 create mode 100644 drivers/bus/fslmc/fslmc_bus.c
 create mode 100644 drivers/bus/fslmc/fslmc_logs.h
 create mode 100644 drivers/bus/fslmc/fslmc_vfio.c
 create mode 100644 drivers/bus/fslmc/fslmc_vfio.h
 create mode 100644 drivers/bus/fslmc/mc/dpbp.c
 create mode 100644 drivers/bus/fslmc/mc/dpio.c
 create mode 100644 drivers/bus/fslmc/mc/dpni.c
 create mode 100644 drivers/bus/fslmc/mc/dpseci.c
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpbp.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpio.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpio_cmd.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpkg.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpni.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpni_cmd.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpseci.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_dpseci_cmd.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_mc_cmd.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_mc_sys.h
 create mode 100644 drivers/bus/fslmc/mc/fsl_net.h
 create mode 100644 drivers/bus/fslmc/mc/mc_sys.c
 create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
 create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
 create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
 create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
 create mode 100644 drivers/bus/fslmc/rte_fslmc.h
 create mode 100644 drivers/bus/fslmc/rte_pmd_fslmcbus_version.map
 create mode 100644 drivers/common/Makefile
 create mode 100644 drivers/common/dpaa2/Makefile
 create mode 100644 drivers/common/dpaa2/qbman/Makefile
 create mode 100644 drivers/common/dpaa2/qbman/include/compat.h
 create mode 100644 drivers/common/dpaa2/qbman/include/fsl_qbman_base.h
 create mode 100644 drivers/common/dpaa2/qbman/include/fsl_qbman_portal.h
 create mode 100644 drivers/common/dpaa2/qbman/qbman_portal.c
 create mode 100644 drivers/common/dpaa2/qbman/qbman_portal.h
 create mode 100644 drivers/common/dpaa2/qbman/qbman_private.h
 create mode 100644 drivers/common/dpaa2/qbman/qbman_sys.h
 create mode 100644 drivers/common/dpaa2/qbman/qbman_sys_decl.h
 create mode 100644 drivers/common/dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map
 create mode 100644 drivers/net/dpaa2/Makefile
 create mode 100644 drivers/net/dpaa2/base/dpaa2_hw_dpni.c
 create mode 100644 drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
 create mode 100644 drivers/net/dpaa2/dpaa2_ethdev.c
 create mode 100644 drivers/net/dpaa2/dpaa2_ethdev.h
 create mode 100644 drivers/net/dpaa2/dpaa2_rxtx.c
 create mode 100644 drivers/net/dpaa2/rte_pmd_dpaa2_version.map
 create mode 100644 drivers/pool/Makefile
 create mode 100644 drivers/pool/dpaa2/Makefile
 create mode 100644 drivers/pool/dpaa2/dpaa2_hw_mempool.c
 create mode 100644 drivers/pool/dpaa2/dpaa2_hw_mempool.h
 create mode 100644 drivers/pool/dpaa2/rte_pmd_dpaa2_pool_version.map

-- 
1.9.1

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-03-15 12:57 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-17 18:42 [dpdk-dev] [PATCH] mbuf: use pktmbuf helper to create the pool Hemant Agrawal
2017-01-17 13:31 ` Santosh Shukla
2017-01-18  6:01   ` Hemant Agrawal
2017-01-31 10:32   ` Olivier Matz
2017-01-20  7:11 ` [dpdk-dev] [PATCH v2] " Hemant Agrawal
2017-01-31  9:55   ` Olivier Matz
2017-02-01 19:31     ` Hemant Agrawal
2017-02-14 22:00   ` [dpdk-dev] [PATCH v3] " Hemant Agrawal
2017-03-14  9:14     ` [dpdk-dev] [PATCH v4] " Olivier Matz
2017-03-15 12:57       ` Thomas Monjalon
2017-01-17 18:52 [dpdk-dev] [PATCHv4 00/33] NXP DPAA2 PMD Hemant Agrawal
2017-01-19 13:23 ` [dpdk-dev] [PATCHv5 " Hemant Agrawal
2017-01-19 13:23   ` [dpdk-dev] [PATCH] mbuf: use pktmbuf helper to create the pool Hemant Agrawal
2017-01-19 13:27     ` Hemant Agrawal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).