From: <jerinj@marvell.com>
To: <dev@dpdk.org>, Jerin Jacob <jerinj@marvell.com>,
Nithin Dabilpuram <ndabilpuram@marvell.com>,
Vamsi Attunuru <vattunuru@marvell.com>
Cc: Olivier Matz <olivier.matz@6wind.com>
Subject: [dpdk-dev] [PATCH v2 21/27] mempool/octeontx2: add mempool alloc op
Date: Sat, 1 Jun 2019 07:18:59 +0530 [thread overview]
Message-ID: <20190601014905.45531-22-jerinj@marvell.com> (raw)
In-Reply-To: <20190601014905.45531-1-jerinj@marvell.com>
From: Jerin Jacob <jerinj@marvell.com>
The DPDK mempool allocation reserves a single HW AURA
and POOL in 1:1 map mode. Upon reservation, SW programs the slow path
operations such as allocate stack memory for DMA and
bunch HW configurations to respective HW blocks.
Cc: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
---
drivers/mempool/octeontx2/Makefile | 1 +
drivers/mempool/octeontx2/meson.build | 3 +-
drivers/mempool/octeontx2/otx2_mempool_ops.c | 246 +++++++++++++++++++
3 files changed, 249 insertions(+), 1 deletion(-)
create mode 100644 drivers/mempool/octeontx2/otx2_mempool_ops.c
diff --git a/drivers/mempool/octeontx2/Makefile b/drivers/mempool/octeontx2/Makefile
index b86d469f4..b3568443e 100644
--- a/drivers/mempool/octeontx2/Makefile
+++ b/drivers/mempool/octeontx2/Makefile
@@ -28,6 +28,7 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL) += \
+ otx2_mempool_ops.c \
otx2_mempool.c \
otx2_mempool_irq.c \
otx2_mempool_debug.c
diff --git a/drivers/mempool/octeontx2/meson.build b/drivers/mempool/octeontx2/meson.build
index ab306b729..9fde40f0e 100644
--- a/drivers/mempool/octeontx2/meson.build
+++ b/drivers/mempool/octeontx2/meson.build
@@ -2,7 +2,8 @@
# Copyright(C) 2019 Marvell International Ltd.
#
-sources = files('otx2_mempool.c',
+sources = files('otx2_mempool_ops.c',
+ 'otx2_mempool.c',
'otx2_mempool_irq.c',
'otx2_mempool_debug.c'
)
diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c
new file mode 100644
index 000000000..0e7b7a77c
--- /dev/null
+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_mempool.h>
+#include <rte_vect.h>
+
+#include "otx2_mempool.h"
+
+static int
+npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
+ struct npa_aura_s *aura, struct npa_pool_s *pool)
+{
+ struct npa_aq_enq_req *aura_init_req, *pool_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ int rc, off;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_INIT;
+ memcpy(&pool_init_req->pool, pool, sizeof(*pool));
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+ off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
+ pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ if (rc == 2 && aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
+ return 0;
+ else
+ return NPA_LF_ERR_AURA_POOL_INIT;
+}
+
+static inline char*
+npa_lf_stack_memzone_name(struct otx2_npa_lf *lf, int pool_id, char *name)
+{
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "otx2_npa_stack_%x_%d",
+ lf->pf_func, pool_id);
+
+ return name;
+}
+
+static inline const struct rte_memzone *
+npa_lf_stack_dma_alloc(struct otx2_npa_lf *lf, char *name,
+ int pool_id, size_t size)
+{
+ return rte_memzone_reserve_aligned(
+ npa_lf_stack_memzone_name(lf, pool_id, name), size, 0,
+ RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
+}
+
+static inline int
+bitmap_ctzll(uint64_t slab)
+{
+ if (slab == 0)
+ return 0;
+
+ return __builtin_ctzll(slab);
+}
+
+static int
+npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
+ const uint32_t block_count, struct npa_aura_s *aura,
+ struct npa_pool_s *pool, uint64_t *aura_handle)
+{
+ int rc, aura_id, pool_id, stack_size, alloc_size;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ uint64_t slab;
+ uint32_t pos;
+
+ /* Sanity check */
+ if (!lf || !block_size || !block_count ||
+ !pool || !aura || !aura_handle)
+ return NPA_LF_ERR_PARAM;
+
+ /* Block size should be cache line aligned and in range of 128B-128KB */
+ if (block_size % OTX2_ALIGN || block_size < 128 ||
+ block_size > 128 * 1024)
+ return NPA_LF_ERR_INVALID_BLOCK_SZ;
+
+ pos = slab = 0;
+ /* Scan from the beginning */
+ __rte_bitmap_scan_init(lf->npa_bmp);
+ /* Scan bitmap to get the free pool */
+ rc = rte_bitmap_scan(lf->npa_bmp, &pos, &slab);
+ /* Empty bitmap */
+ if (rc == 0) {
+ otx2_err("Mempools exhausted, 'max_pools' devargs to increase");
+ return -ERANGE;
+ }
+
+ /* Get aura_id from resource bitmap */
+ aura_id = pos + bitmap_ctzll(slab);
+ /* Mark pool as reserved */
+ rte_bitmap_clear(lf->npa_bmp, aura_id);
+
+ /* Configuration based on each aura has separate pool(aura-pool pair) */
+ pool_id = aura_id;
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || aura_id >=
+ (int)BIT_ULL(6 + lf->aura_sz)) ? NPA_LF_ERR_AURA_ID_ALLOC : 0;
+ if (rc)
+ goto exit;
+
+ /* Allocate stack memory */
+ stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
+ alloc_size = stack_size * lf->stack_pg_bytes;
+
+ mz = npa_lf_stack_dma_alloc(lf, name, pool_id, alloc_size);
+ if (mz == NULL) {
+ rc = -ENOMEM;
+ goto aura_res_put;
+ }
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id;/* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = __builtin_clz(block_count) - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Update pool fields */
+ pool->stack_base = mz->iova;
+ pool->ena = 1;
+ pool->buf_size = block_size / OTX2_ALIGN;
+ pool->stack_max_pages = stack_size;
+ pool->shift = __builtin_clz(block_count) - 8;
+ pool->ptr_start = 0;
+ pool->ptr_end = ~0;
+ pool->stack_caching = 1;
+ pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
+ pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
+ pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
+
+ /* Many to one reduction */
+ pool->err_qint_idx = pool_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_lf_aura_pool_init(lf->mbox, aura_id, aura, pool);
+ if (rc)
+ goto stack_mem_free;
+
+ *aura_handle = npa_lf_aura_handle_gen(aura_id, lf->base);
+
+ /* Update aura count */
+ npa_lf_aura_op_cnt_set(*aura_handle, 0, block_count);
+ /* Read it back to make sure aura count is updated */
+ npa_lf_aura_op_cnt_get(*aura_handle);
+
+ return 0;
+
+stack_mem_free:
+ rte_memzone_free(mz);
+aura_res_put:
+ rte_bitmap_set(lf->npa_bmp, aura_id);
+exit:
+ return rc;
+}
+
+static int
+otx2_npa_alloc(struct rte_mempool *mp)
+{
+ uint32_t block_size, block_count;
+ struct otx2_npa_lf *lf;
+ struct npa_aura_s aura;
+ struct npa_pool_s pool;
+ uint64_t aura_handle;
+ int rc;
+
+ lf = otx2_npa_lf_obj_get();
+ if (lf == NULL) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ block_size = mp->elt_size + mp->header_size + mp->trailer_size;
+ block_count = mp->size;
+
+ if (block_size % OTX2_ALIGN != 0) {
+ otx2_err("Block size should be multiple of 128B");
+ rc = -ERANGE;
+ goto error;
+ }
+
+ memset(&aura, 0, sizeof(struct npa_aura_s));
+ memset(&pool, 0, sizeof(struct npa_pool_s));
+ pool.nat_align = 1;
+ pool.buf_offset = 1;
+
+ if ((uint32_t)pool.buf_offset * OTX2_ALIGN != mp->header_size) {
+ otx2_err("Unsupported mp->header_size=%d", mp->header_size);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Use driver specific mp->pool_config to override aura config */
+ if (mp->pool_config != NULL)
+ memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+
+ rc = npa_lf_aura_pool_pair_alloc(lf, block_size, block_count,
+ &aura, &pool, &aura_handle);
+ if (rc) {
+ otx2_err("Failed to alloc pool or aura rc=%d", rc);
+ goto error;
+ }
+
+ /* Store aura_handle for future queue operations */
+ mp->pool_id = aura_handle;
+ otx2_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%"PRIx64,
+ lf, block_size, block_count, aura_handle);
+
+ /* Just hold the reference of the object */
+ otx2_npa_lf_obj_ref();
+ return 0;
+error:
+ return rc;
+}
+
+static struct rte_mempool_ops otx2_npa_ops = {
+ .name = "octeontx2_npa",
+ .alloc = otx2_npa_alloc,
+};
+
+MEMPOOL_REGISTER_OPS(otx2_npa_ops);
--
2.21.0
next prev parent reply other threads:[~2019-06-01 1:53 UTC|newest]
Thread overview: 122+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-23 8:13 [dpdk-dev] [PATCH v1 00/27] OCTEON TX2 common and mempool driver jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 02/27] common/octeontx2: add IO handling APIs jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 05/27] common/octeontx2: add runtime log infra jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 07/27] common/octeontx2: introduce common device class jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 09/27] common/octeontx2: handle intra device operations jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 11/27] common/octeontx2: add PF to VF " jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 13/27] common/octeontx2: add uplink message support jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 20/27] mempool/octeontx2: add context dump support jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 22/27] mempool/octeontx2: add mempool free op jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-05-24 13:32 ` Aaron Conole
2019-05-27 9:20 ` Jerin Jacob Kollanukkaran
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-05-23 8:13 ` [dpdk-dev] [PATCH v1 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 00/27] OCTEON TX2 common and mempool driver jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 07/27] common/octeontx2: introduce common device class jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 11/27] common/octeontx2: add PF to VF " jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 13/27] common/octeontx2: add uplink message support jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-01 1:48 ` [dpdk-dev] [PATCH v2 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-01 1:48 ` jerinj [this message]
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-01 1:49 ` [dpdk-dev] [PATCH v2 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 00/27] OCTEON TX2 common and mempool driver jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 07/27] common/octeontx2: introduce common device class jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 11/27] common/octeontx2: add PF to VF " jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 13/27] common/octeontx2: add uplink message support jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-17 21:25 ` Aaron Conole
2019-06-18 7:39 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2019-06-21 19:26 ` Aaron Conole
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-17 15:55 ` [dpdk-dev] [PATCH v3 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-20 8:39 ` [dpdk-dev] [PATCH v3 00/27] OCTEON TX2 common and mempool driver Jerin Jacob Kollanukkaran
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 " jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 07/27] common/octeontx2: introduce common device class jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-22 13:23 ` [dpdk-dev] [PATCH v4 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 11/27] common/octeontx2: add PF to VF " jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 13/27] common/octeontx2: add uplink message support jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-22 13:24 ` [dpdk-dev] [PATCH v4 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-25 21:25 ` Thomas Monjalon
2019-06-25 21:39 ` [dpdk-dev] [PATCH v4 00/27] OCTEON TX2 common and mempool driver Thomas Monjalon
2019-06-26 23:10 ` Stephen Hemminger
2019-06-26 13:14 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190601014905.45531-22-jerinj@marvell.com \
--to=jerinj@marvell.com \
--cc=dev@dpdk.org \
--cc=ndabilpuram@marvell.com \
--cc=olivier.matz@6wind.com \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).