DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Ananyev, Konstantin" <konstantin.ananyev@intel.com>
To: "vadim.suraev@gmail.com" <vadim.suraev@gmail.com>,
	"dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v2] rte_mbuf: mbuf bulk alloc/free functions added + unittest
Date: Tue, 17 Mar 2015 23:46:50 +0000
Message-ID: <2601191342CEEE43887BDE71AB977258213F6F10@irsmsx105.ger.corp.intel.com> (raw)
In-Reply-To: <1426628169-1735-1-git-send-email-vadim.suraev@gmail.com>

Hi Vadim,

> -----Original Message-----
> From: vadim.suraev@gmail.com [mailto:vadim.suraev@gmail.com]
> Sent: Tuesday, March 17, 2015 9:36 PM
> To: dev@dpdk.org
> Cc: olivier.matz@6wind.com; stephen@networkplumber.org; Ananyev, Konstantin; vadim.suraev@gmail.com
> Subject: [PATCH v2] rte_mbuf: mbuf bulk alloc/free functions added + unittest
> 
> From: "vadim.suraev@gmail.com" <vadim.suraev@gmail.com>
> 
> This patch adds mbuf bulk allocation/freeing functions and unittest
> 
> Signed-off-by: Vadim Suraev
> <vadim.suraev@gmail.com>
> ---
> New in v2:
>     - function rte_pktmbuf_alloc_bulk added
>     - function rte_pktmbuf_bulk_free added
>     - function rte_pktmbuf_free_chain added
>     - applied reviewers' comments
> 
>  app/test/test_mbuf.c       |   94 +++++++++++++++++++++++++++++++++++++++++++-
>  lib/librte_mbuf/rte_mbuf.h |   89 +++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 182 insertions(+), 1 deletion(-)
> 
> diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
> index 1ff66cb..b20c6a4 100644
> --- a/app/test/test_mbuf.c
> +++ b/app/test/test_mbuf.c
> @@ -77,6 +77,7 @@
>  #define REFCNT_RING_SIZE        (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
> 
>  #define MAKE_STRING(x)          # x
> +#define MBUF_POOL_LOCAL_CACHE_SIZE 32
> 
>  static struct rte_mempool *pktmbuf_pool = NULL;
> 
> @@ -405,6 +406,84 @@ test_pktmbuf_pool(void)
>  	return ret;
>  }
> 
> +/* test pktmbuf bulk allocation and freeing
> +*/
> +static int
> +test_pktmbuf_pool_bulk(void)
> +{
> +	unsigned i;
> +	/* size of mempool - size of local cache, otherwise may fail */
> +	unsigned mbufs_to_allocate = NB_MBUF - MBUF_POOL_LOCAL_CACHE_SIZE;
> +	struct rte_mbuf *m[mbufs_to_allocate];
> +	int ret = 0;
> +	unsigned mbuf_count_before_allocation = rte_mempool_count(pktmbuf_pool);
> +
> +	for (i = 0; i < mbufs_to_allocate; i++)
> +		m[i] = NULL;
> +	/* alloc NB_MBUF-MBUF_POOL_LOCAL_CACHE_SIZE mbufs */
> +	ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, m, mbufs_to_allocate);
> +	if (ret) {
> +		printf("cannot allocate %d mbufs bulk mempool_cnt=%d ret=%d\n",
> +			mbufs_to_allocate,
> +			rte_mempool_count(pktmbuf_pool),
> +			ret);
> +		return -1;
> +	}
> +	if ((rte_mempool_count(pktmbuf_pool) + mbufs_to_allocate) !=
> +	    mbuf_count_before_allocation) {
> +		printf("mempool count %d + allocated %d != initial %d\n",
> +			rte_mempool_count(pktmbuf_pool),
> +			mbufs_to_allocate,
> +			mbuf_count_before_allocation);
> +		return -1;
> +	}
> +	/* free them */
> +	rte_pktmbuf_bulk_free(m, mbufs_to_allocate);
> +
> +	if (rte_mempool_count(pktmbuf_pool)  != mbuf_count_before_allocation) {
> +		printf("mempool count %d != initial %d\n",
> +			rte_mempool_count(pktmbuf_pool),
> +			mbuf_count_before_allocation);
> +		return -1;
> +	}
> +	for (i = 0; i < mbufs_to_allocate; i++)
> +		m[i] = NULL;
> +
> +	/* alloc NB_MBUF-MBUF_POOL_LOCAL_CACHE_SIZE mbufs */
> +	ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, m, mbufs_to_allocate);
> +	if (ret) {
> +		printf("cannot allocate %d mbufs bulk mempool_cnt=%d ret=%d\n",
> +			mbufs_to_allocate,
> +			rte_mempool_count(pktmbuf_pool),
> +			ret);
> +		return -1;
> +	}
> +	if ((rte_mempool_count(pktmbuf_pool) + mbufs_to_allocate) !=
> +	    mbuf_count_before_allocation) {
> +		printf("mempool count %d + allocated %d != initial %d\n",
> +			rte_mempool_count(pktmbuf_pool),
> +					  mbufs_to_allocate,
> +					  mbuf_count_before_allocation);
> +		return -1;
> +	}
> +
> +	/* chain it */
> +	for (i = 0; i < mbufs_to_allocate - 1; i++) {
> +		m[i]->next = m[i + 1];
> +		m[0]->nb_segs++;
> +	}
> +	/* free them */
> +	rte_pktmbuf_free_chain(m[0]);
> +
> +	if (rte_mempool_count(pktmbuf_pool)  != mbuf_count_before_allocation) {
> +		printf("mempool count %d != initial %d\n",
> +			rte_mempool_count(pktmbuf_pool),
> +					  mbuf_count_before_allocation);
> +		return -1;
> +	}
> +	return ret;
> +}
> +
>  /*
>   * test that the pointer to the data on a packet mbuf is set properly
>   */
> @@ -766,7 +845,8 @@ test_mbuf(void)
>  	if (pktmbuf_pool == NULL) {
>  		pktmbuf_pool =
>  			rte_mempool_create("test_pktmbuf_pool", NB_MBUF,
> -					   MBUF_SIZE, 32,
> +					   MBUF_SIZE,
> +					   MBUF_POOL_LOCAL_CACHE_SIZE,
>  					   sizeof(struct rte_pktmbuf_pool_private),
>  					   rte_pktmbuf_pool_init, NULL,
>  					   rte_pktmbuf_init, NULL,
> @@ -790,6 +870,18 @@ test_mbuf(void)
>  		return -1;
>  	}
> 
> +	/* test bulk allocation and freeing */
> +	if (test_pktmbuf_pool_bulk() < 0) {
> +		printf("test_pktmbuf_pool_bulk() failed\n");
> +		return -1;
> +	}
> +
> +	/* once again to ensure all mbufs were freed */
> +	if (test_pktmbuf_pool_bulk() < 0) {
> +		printf("test_pktmbuf_pool_bulk() failed\n");
> +		return -1;
> +	}
> +
>  	/* test that the pointer to the data on a packet mbuf is set properly */
>  	if (test_pktmbuf_pool_ptr() < 0) {
>  		printf("test_pktmbuf_pool_ptr() failed\n");
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 17ba791..995237d 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -825,6 +825,95 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
>  }
> 
>  /**
> + * Allocate a bulk of mbufs, initiate refcnt and resets
> + *
> + * @param pool
> + *    memory pool to allocate from
> + * @param mbufs
> + *    Array of pointers to mbuf
> + * @param count
> + *    Array size
> + */
> +static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
> +					 struct rte_mbuf **mbufs,
> +					 unsigned count)
> +{
> +	unsigned idx;
> +	int rc = 0;
> +
> +	rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
> +	if (unlikely(rc))
> +		return rc;
> +
> +	for (idx = 0; idx < count; idx++) {
> +		RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
> +		rte_mbuf_refcnt_set(mbufs[idx], 1);
> +		rte_pktmbuf_reset(mbufs[idx]);
> +	}
> +	return rc;
> +}
> +
> +/**
> + * Free a bulk of mbufs into its original mempool.
> + * This function assumes refcnt equals 1
> + * as well as the freed mbufs are direct

I think your forgot to mention in comments one more requirement for that function:
all mbufs have to be from the same mempool.

> + *
> + * @param mbufs
> + *    Array of pointers to mbuf
> + * @param count
> + *    Array size
> + */
> +static inline void rte_pktmbuf_bulk_free(struct rte_mbuf **mbufs,
> +					 unsigned count)
> +{
> +	unsigned idx;
> +
> +	RTE_MBUF_ASSERT(count > 0);
> +
> +	for (idx = 0; idx < count; idx++) {
> +		rte_mbuf_refcnt_update(mbufs[idx], -1);

You can do just:
rte_mbuf_refcnt_set(m, 0);
here and move your assert above it.
Something like:
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 1);
rte_mbuf_refcnt_set(m, 0);

Also probably would be a good thing to add one more assert here,
something like:
RTE_MBUF_ASSERT(mbufs[idx]->pool == mufs[0]->pool);

> +		RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
> +	}
> +	rte_mempool_put_bulk(mbufs[0]->pool, (void **)mbufs, count);
> +}
> +
> +/**
> + * Free chained (scattered) mbufs into its original mempool(s).
> + *
> + * @param head
> + *    The head of mbufs to be freed chain. Must not be NULL
> + */
> +static inline void rte_pktmbuf_free_chain(struct rte_mbuf *head)
> +{
> +	struct rte_mbuf *mbufs[head->nb_segs];
> +	unsigned mbufs_count = 0;
> +	struct rte_mbuf *next;
> +
> +	while (head) {
> +		next = head->next;
> +		head->next = NULL;

Shouldn't the line above be inside if (head != NULL) {...} block?

> +		head = __rte_pktmbuf_prefree_seg(head);
> +		if (likely(head != NULL)) {
> +			RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(head) == 0);

I don't think there is any use of the assert above.
If prefree_seg returns non-NULL value, it sets refcnt to 0 for that mbuf.

> +			if (likely((!mbufs_count) ||
> +				   (head->pool == mbufs[0]->pool)))
> +				mbufs[mbufs_count++] = head;
> +			else {
> +				rte_mempool_put_bulk(mbufs[0]->pool,
> +						     (void **)mbufs,
> +						     mbufs_count);
> +				mbufs_count = 0;
> +			}
> +		}
> +		head = next;
> +	}
> +	if (mbufs_count > 0)
> +		rte_mempool_put_bulk(mbufs[0]->pool,
> +				     (void **)mbufs,
> +				     mbufs_count);
> +}
> +
> +/**
>   * Creates a "clone" of the given packet mbuf.
>   *
>   * Walks through all segments of the given packet mbuf, and for each of them:
> --
> 1.7.9.5

  reply	other threads:[~2015-03-17 23:46 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-17 21:36 vadim.suraev
2015-03-17 23:46 ` Ananyev, Konstantin [this message]
2015-03-18  5:19   ` Vadim Suraev
     [not found]     ` <2601191342CEEE43887BDE71AB977258213F7053@irsmsx105.ger.corp.intel.com>
2015-03-18  9:56       ` Ananyev, Konstantin
2015-03-18 10:41         ` Vadim Suraev
     [not found]           ` <2601191342CEEE43887BDE71AB977258213F7136@irsmsx105.ger.corp.intel.com>
2015-03-18 15:13             ` Ananyev, Konstantin
2015-03-19  8:13               ` Olivier MATZ
2015-03-19 10:47                 ` Ananyev, Konstantin
2015-03-19 10:54                   ` Olivier MATZ
2015-03-18 20:21 vadim.suraev
2015-03-18 20:58 ` Neil Horman
2015-03-19  8:41   ` Olivier MATZ
2015-03-19 10:06     ` Ananyev, Konstantin
2015-03-19 13:16     ` Neil Horman
2015-03-23 16:44       ` Olivier MATZ
2015-03-23 17:31         ` Vadim Suraev
2015-03-23 23:48           ` Ananyev, Konstantin
2015-03-24  7:53             ` Vadim Suraev
     [not found]               ` <2601191342CEEE43887BDE71AB977258214071C0@irsmsx105.ger.corp.intel.com>
2015-03-24 11:00                 ` Ananyev, Konstantin
2015-03-23 18:45         ` Neil Horman
2015-03-30 19:04   ` Vadim Suraev
2015-03-30 20:15     ` Neil Horman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2601191342CEEE43887BDE71AB977258213F6F10@irsmsx105.ger.corp.intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=dev@dpdk.org \
    --cc=vadim.suraev@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git