From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf0-f54.google.com (mail-lf0-f54.google.com [209.85.215.54]) by dpdk.org (Postfix) with ESMTP id 44F2E37AC for ; Wed, 18 Jan 2017 09:54:46 +0100 (CET) Received: by mail-lf0-f54.google.com with SMTP id z134so4969918lff.3 for ; Wed, 18 Jan 2017 00:54:46 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=brain4net.com; s=google; h=from:to:cc:subject:date:message-id; bh=Myz3ea3k/brg93LI8n2PRXtQh9Hpmltj/Yiu9pxsYb0=; b=faYYi8uJqPKh7R9x0OiiQEI1rKECQc3Fj471wlt7Ge2Msayl5cpm+6DnaVQLUW1Xrm 6XtzWQzpag9DRUPiNIj6UtYTvHwIg7ACnuZAEMlV5lKpPPcHNucBLeVg7WOGynKlad0u s6ax0C0Y9fz2mHqgmlW0X9O+xz6DFIxOGF+Ao= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id; bh=Myz3ea3k/brg93LI8n2PRXtQh9Hpmltj/Yiu9pxsYb0=; b=qi8veTEVrQ/QMFgeVJ8qRaS2RDGN/J2f+IHTytZUORz3C5noUrYS2bolHD1NXdT84S YAYV1rD6tk6IGH7ICtMz/Pqt/vLqsP3KQptvvlVoyXBNtSxat6DBn4GLMChJ53AVY52X a6NiUcNBxa/Zg22BAgt7S9Y6oN8JYLqwdMxuMPSlPnSyhj3DbEDLtGssDTmynUSmvodR JFM3alVlyrR7vlS+6n1EbTzHFkb2F+P+hhuH/TalfXhgflSr2Y05y9czMDtALk5+/+iP 0ZHKpTcYQg+QxrZxCjUQkd48JYcva0vHVkvl1i9dy7ogiP3YwKwf279SEPXwgjPysHRk PIkg== X-Gm-Message-State: AIkVDXIiog1AcVaML45R11GnSc3zXf5L8waBb68sWoGkuK4RCz9jeByR0G8W8aHpEutUPD3p X-Received: by 10.25.196.136 with SMTP id u130mr638571lff.37.1484729685806; Wed, 18 Jan 2017 00:54:45 -0800 (PST) Received: from localhost.localdomain ([91.201.72.212]) by smtp.gmail.com with ESMTPSA id 96sm10165369lfp.18.2017.01.18.00.54.44 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 18 Jan 2017 00:54:45 -0800 (PST) From: Sergey Vyazmitinov To: olivier.matz@6wind.com Cc: ferruh.yigit@intel.com, dev@dpdk.org, Sergey Vyazmitinov Date: Wed, 18 Jan 2017 15:54:39 +0700 Message-Id: <1484729679-3092-1-git-send-email-s.vyazmitinov@brain4net.com> X-Mailer: git-send-email 2.7.4 Subject: [dpdk-dev] [PATCH v3] kni: use bulk functions to allocate and free mbufs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 18 Jan 2017 08:54:46 -0000 Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk functions. This can improve performance more than two times. Signed-off-by: Sergey Vyazmitinov --- v2: * CSG fixes. v3: * Fixed issue with possible different mempools in buffer list. * Fixed issue with wrong rte_pktmbuf_alloc_bulk function return value processing in the kni_allocate_mbufs. --- lib/librte_kni/rte_kni.c | 46 +++++++++++++++++++--------------------- lib/librte_kni/rte_kni_fifo.h | 18 ++++++++++++++++ lib/librte_mbuf/rte_mbuf.h | 49 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 24 deletions(-) diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index a80cefd..ad746ad 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) static void kni_free_mbufs(struct rte_kni *kni) { - int i, ret; + unsigned int freeing; struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; - ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM); - if (likely(ret > 0)) { - for (i = 0; i < ret; i++) - rte_pktmbuf_free(pkts[i]); + freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM); + if (likely(freeing > 0)) { + rte_pktmbuf_free_bulk(pkts, freeing); } } static void kni_allocate_mbufs(struct rte_kni *kni) { - int i, ret; - struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; - void *phys[MAX_MBUF_BURST_NUM]; + unsigned int count, put; + struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX]; + void *phys[KNI_FIFO_COUNT_MAX]; RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) != offsetof(struct rte_kni_mbuf, pool)); @@ -628,28 +627,27 @@ kni_allocate_mbufs(struct rte_kni *kni) return; } - for (i = 0; i < MAX_MBUF_BURST_NUM; i++) { - pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); - if (unlikely(pkts[i] == NULL)) { - /* Out of memory */ - RTE_LOG(ERR, KNI, "Out of memory\n"); - break; - } - phys[i] = va2pa(pkts[i]); - } + /* Calculate alloc queue free space */ + count = kni_fifo_free_count(kni->alloc_q); - /* No pkt mbuf alocated */ - if (i <= 0) + /* Get buffers from mempool */ + if (rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count) != 0) { + RTE_LOG(ERR, KNI, "Can`t allocate %d mbufs\n", count); return; + } - ret = kni_fifo_put(kni->alloc_q, phys, i); + for (unsigned int i = 0; i < count; i++) + phys[i] = va2pa(pkts[i]); - /* Check if any mbufs not put into alloc_q, and then free them */ - if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) { - int j; + /* Put buffers into alloc queue */ + put = kni_fifo_put(kni->alloc_q, (void **)phys, count); - for (j = ret; j < i; j++) + /* Check if any mbufs not put into alloc_q, and then free them */ + if (unlikely(put < count)) { + for (unsigned int j = put; j < count; j++) { + RTE_LOG(ERR, KNI, "Free allocated buffer\n"); rte_pktmbuf_free(pkts[j]); + } } } diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h index 8cb8587..361ddb0 100644 --- a/lib/librte_kni/rte_kni_fifo.h +++ b/lib/librte_kni/rte_kni_fifo.h @@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num) fifo->read = new_read; return i; } + +/** + * Get the num of elements in the fifo + */ +static inline unsigned +kni_fifo_count(struct rte_kni_fifo *fifo) +{ + return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1); +} + +/** + * Get the num of available elements in the fifo + */ +static inline unsigned +kni_fifo_free_count(struct rte_kni_fifo *fifo) +{ + return (fifo->read - fifo->write - 1) & (fifo->len - 1); +} diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 4476d75..69d314f 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -306,6 +306,9 @@ extern "C" { /** Alignment constraint of mbuf private area. */ #define RTE_MBUF_PRIV_ALIGN 8 +/** Maximum number of mbufs freed in bulk. */ +#define RTE_MBUF_BULK_FREE 64 + /** * Get the name of a RX offload flag * @@ -1261,6 +1264,52 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m) } /** + * Free n packets mbuf back into its original mempool. + * + * Free each mbuf, and all its segments in case of chained buffers. Each + * segment is added back into its original mempool. + * + * @param mp + * The packets mempool. + * @param mbufs + * The packets mbufs array to be freed. + * @param n + * Number of packets. + */ +static inline void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, + unsigned int n) +{ + void *tofree[RTE_MBUF_BULK_FREE]; + struct rte_mempool *mp = NULL; + unsigned int i, count = 0; + + for (i = 0; i < n; i++) { + struct rte_mbuf *m, *m_next; + + for (m = mbufs[i]; m; m = m_next) { + m_next = m->next; + + if (count > 0 && + (unlikely(m->pool != mp || + count == RTE_MBUF_BULK_FREE))) { + rte_mempool_put_bulk(mp, tofree, count); + count = 0; + } + + mp = m->pool; + + if (likely(__rte_pktmbuf_prefree_seg(m) != NULL)) { + m->next = NULL; + tofree[count++] = m; + } + } + } + + if (likely(count > 0)) + rte_mempool_put_bulk(mp, tofree, count); +} + +/** * Creates a "clone" of the given packet mbuf. * * Walks through all segments of the given packet mbuf, and for each of them: -- 2.7.4