From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 90D5BA04B9; Mon, 7 Sep 2020 11:28:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 99CC01C19C; Mon, 7 Sep 2020 11:27:05 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by dpdk.org (Postfix) with ESMTP id 775511C120 for ; Mon, 7 Sep 2020 11:27:04 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 57B50200112; Mon, 7 Sep 2020 11:27:04 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id C23E3200E33; Mon, 7 Sep 2020 11:27:01 +0200 (CEST) Received: from lsv11086.swis.cn-sha01.nxp.com (lsv11086.swis.cn-sha01.nxp.com [92.121.210.87]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id E1E29402DF; Mon, 7 Sep 2020 11:26:55 +0200 (CEST) From: Gagandeep Singh To: dev@dpdk.org, nipun.gupta@nxp.com, hemant.agrawal@nxp.com Cc: thomas.monjalon@6wind.com, Jun Yang Date: Mon, 7 Sep 2020 17:26:03 +0800 Message-Id: <1599470764-30569-7-git-send-email-g.singh@nxp.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1599470764-30569-1-git-send-email-g.singh@nxp.com> References: <1599470764-30569-1-git-send-email-g.singh@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH 6/7] raw/dpaa2_qdma: support FLE pool per queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Jun Yang Don't mix SG/none-SG with same FLE pool format, otherwise, it impacts none-SG performance. In order to support SG queue and none-SG queue with different FLE pool element formats, associate FLE pool with queue instead of device. Signed-off-by: Jun Yang --- drivers/raw/dpaa2_qdma/dpaa2_qdma.c | 111 +++++++++++++++++----------- drivers/raw/dpaa2_qdma/dpaa2_qdma.h | 28 ++++--- drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h | 2 +- 3 files changed, 88 insertions(+), 53 deletions(-) diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c index 0c56a04..ba46ed0 100644 --- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c +++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c @@ -300,12 +300,11 @@ static inline int dpdmai_dev_set_multi_fd_lf( struct rte_qdma_job **ppjob; uint16_t i; int ret; - struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq); void *elem[RTE_QDMA_BURST_NB_MAX]; struct qbman_fle *fle; uint64_t elem_iova, fle_iova; - ret = rte_mempool_get_bulk(qdma_dev->fle_pool, elem, nb_jobs); + ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs); if (ret) { DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); return ret; @@ -318,11 +317,8 @@ static inline int dpdmai_dev_set_multi_fd_lf( elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]); #endif - *((uint16_t *) - ((uint64_t)elem[i] + QDMA_FLE_JOB_NB_OFFSET)) = 1; - ppjob = (struct rte_qdma_job **) - ((uint64_t)elem[i] + QDMA_FLE_JOBS_OFFSET); + ((uint64_t)elem[i] + QDMA_FLE_SINGLE_JOB_OFFSET); *ppjob = job[i]; job[i]->vq_id = qdma_vq->vq_id; @@ -360,13 +356,12 @@ static inline int dpdmai_dev_set_sg_fd_lf( int ret = 0, i; struct qdma_sg_entry *src_sge, *dst_sge; uint32_t len, fmt, flags; - struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq); /* * Get an FLE/SDD from FLE pool. * Note: IO metadata is before the FLE and SDD memory. */ - ret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&elem)); + ret = rte_mempool_get(qdma_vq->fle_pool, (void **)(&elem)); if (ret) { DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); return ret; @@ -382,7 +377,7 @@ static inline int dpdmai_dev_set_sg_fd_lf( /* Save job context. */ *((uint16_t *)((uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs; ppjob = (struct rte_qdma_job **) - ((uint64_t)elem + QDMA_FLE_JOBS_OFFSET); + ((uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET); for (i = 0; i < nb_jobs; i++) ppjob[i] = job[i]; @@ -450,7 +445,40 @@ static inline uint16_t dpdmai_dev_get_job_us( return vqid; } -static inline uint16_t dpdmai_dev_get_job_lf( +static inline uint16_t dpdmai_dev_get_single_job_lf( + struct qdma_virt_queue *qdma_vq, + const struct qbman_fd *fd, + struct rte_qdma_job **job, + uint16_t *nb_jobs) +{ + struct qbman_fle *fle; + struct rte_qdma_job **ppjob = NULL; + uint16_t status; + + /* + * Fetch metadata from FLE. job and vq_id were set + * in metadata in the enqueue operation. + */ + fle = (struct qbman_fle *) + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + + *nb_jobs = 1; + ppjob = (struct rte_qdma_job **)((uint64_t)fle - + QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET); + + status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF); + + *job = *ppjob; + (*job)->status = status; + + /* Free FLE to the pool */ + rte_mempool_put(qdma_vq->fle_pool, + (void *)((uint64_t)fle - QDMA_FLE_FLE_OFFSET)); + + return (*job)->vq_id; +} + +static inline uint16_t dpdmai_dev_get_sg_job_lf( struct qdma_virt_queue *qdma_vq, const struct qbman_fd *fd, struct rte_qdma_job **job, @@ -459,7 +487,6 @@ static inline uint16_t dpdmai_dev_get_job_lf( struct qbman_fle *fle; struct rte_qdma_job **ppjob = NULL; uint16_t i, status; - struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq); /* * Fetch metadata from FLE. job and vq_id were set @@ -470,10 +497,9 @@ static inline uint16_t dpdmai_dev_get_job_lf( *nb_jobs = *((uint16_t *)((uint64_t)fle - QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET)); - status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF); - ppjob = (struct rte_qdma_job **)((uint64_t)fle - - QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOBS_OFFSET); + QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET); + status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF); for (i = 0; i < (*nb_jobs); i++) { job[i] = ppjob[i]; @@ -481,7 +507,7 @@ static inline uint16_t dpdmai_dev_get_job_lf( } /* Free FLE to the pool */ - rte_mempool_put(qdma_dev->fle_pool, + rte_mempool_put(qdma_vq->fle_pool, (void *)((uint64_t)fle - QDMA_FLE_FLE_OFFSET)); return job[0]->vq_id; @@ -1044,14 +1070,9 @@ dpaa2_qdma_reset(struct rte_rawdev *rawdev) memset(&qdma_core_info, 0, sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE); - /* Free the FLE pool */ - if (qdma_dev->fle_pool) - rte_mempool_free(qdma_dev->fle_pool); - /* Reset QDMA device structure */ qdma_dev->max_hw_queues_per_core = 0; - qdma_dev->fle_pool = NULL; - qdma_dev->fle_pool_count = 0; + qdma_dev->fle_queue_pool_cnt = 0; qdma_dev->max_vqs = 0; return 0; @@ -1094,23 +1115,7 @@ dpaa2_qdma_configure(const struct rte_rawdev *rawdev, return -ENOMEM; } qdma_dev->max_vqs = qdma_config->max_vqs; - - /* Allocate FLE pool; just append PID so that in case of - * multiprocess, the pool's don't collide. - */ - snprintf(name, sizeof(name), "qdma_fle_pool%u", - getpid()); - qdma_dev->fle_pool = rte_mempool_create(name, - qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE, - QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0, - NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); - if (!qdma_dev->fle_pool) { - DPAA2_QDMA_ERR("qdma_fle_pool create failed"); - rte_free(qdma_dev->vqs); - qdma_dev->vqs = NULL; - return -ENOMEM; - } - qdma_dev->fle_pool_count = qdma_config->fle_pool_count; + qdma_dev->fle_queue_pool_cnt = qdma_config->fle_queue_pool_cnt; return 0; } @@ -1171,11 +1176,13 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev, rte_rawdev_obj_t queue_conf) { char ring_name[32]; + char pool_name[64]; int i; struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private; struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; struct rte_qdma_queue_config *q_config = (struct rte_qdma_queue_config *)queue_conf; + uint32_t pool_size; DPAA2_QDMA_FUNC_TRACE(); @@ -1207,6 +1214,9 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev, rte_spinlock_unlock(&qdma_dev->lock); return -ENODEV; } + pool_size = QDMA_FLE_SG_POOL_SIZE; + } else { + pool_size = QDMA_FLE_SINGLE_POOL_SIZE; } if (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) { @@ -1217,7 +1227,7 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev, /* Allocate a Ring for Virtual Queue in VQ mode */ snprintf(ring_name, sizeof(ring_name), "status ring %d", i); qdma_dev->vqs[i].status_ring = rte_ring_create(ring_name, - qdma_dev->fle_pool_count, rte_socket_id(), 0); + qdma_dev->fle_queue_pool_cnt, rte_socket_id(), 0); if (!qdma_dev->vqs[i].status_ring) { DPAA2_QDMA_ERR("Status ring creation failed for vq"); rte_spinlock_unlock(&qdma_dev->lock); @@ -1239,17 +1249,31 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev, return -ENODEV; } + snprintf(pool_name, sizeof(pool_name), + "qdma_fle_pool%u_queue%d", getpid(), i); + qdma_dev->vqs[i].fle_pool = rte_mempool_create(pool_name, + qdma_dev->fle_queue_pool_cnt, pool_size, + QDMA_FLE_CACHE_SIZE(qdma_dev->fle_queue_pool_cnt), 0, + NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); + if (!qdma_dev->vqs[i].fle_pool) { + DPAA2_QDMA_ERR("qdma_fle_pool create failed"); + rte_spinlock_unlock(&qdma_dev->lock); + return -ENOMEM; + } + qdma_dev->vqs[i].flags = q_config->flags; qdma_dev->vqs[i].in_use = 1; qdma_dev->vqs[i].lcore_id = q_config->lcore_id; memset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp)); if (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) { - if (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT) + if (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT) { qdma_dev->vqs[i].set_fd = dpdmai_dev_set_sg_fd_lf; - else + qdma_dev->vqs[i].get_job = dpdmai_dev_get_sg_job_lf; + } else { qdma_dev->vqs[i].set_fd = dpdmai_dev_set_multi_fd_lf; - qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf; + qdma_dev->vqs[i].get_job = dpdmai_dev_get_single_job_lf; + } } else { qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us; qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us; @@ -1435,6 +1459,9 @@ dpaa2_qdma_queue_release(struct rte_rawdev *rawdev, put_hw_queue(qdma_vq->hw_queue); } + if (qdma_vq->fle_pool) + rte_mempool_free(qdma_vq->fle_pool); + memset(qdma_vq, 0, sizeof(struct qdma_virt_queue)); rte_spinlock_unlock(&qdma_dev->lock); diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h index 43a01d5..0892a19 100644 --- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h +++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h @@ -15,19 +15,27 @@ struct rte_qdma_job; #define DPAA2_DPDMAI_MAX_QUEUES 8 -/** FLE pool size: job number(uint64_t) + - * 3 Frame list + 2 source/destination descriptor + - * 32 (src + dst) sg entries + 32 jobs pointers. +/** FLE single job pool size: job pointer(uint64_t) + + * 3 Frame list + 2 source/destination descriptor. */ +#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \ + sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \ + sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD) -#define QDMA_FLE_POOL_SIZE (sizeof(uint64_t) + \ +/** FLE sg jobs pool size: job number(uint64_t) + + * 3 Frame list + 2 source/destination descriptor + + * 64 (src + dst) sg entries + 64 jobs pointers. + */ +#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \ - sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2 + \ + sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \ sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB) #define QDMA_FLE_JOB_NB_OFFSET 0 +#define QDMA_FLE_SINGLE_JOB_OFFSET 0 + #define QDMA_FLE_FLE_OFFSET \ (QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t)) @@ -39,7 +47,7 @@ struct rte_qdma_job; (QDMA_FLE_SDD_OFFSET + \ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD) -#define QDMA_FLE_JOBS_OFFSET \ +#define QDMA_FLE_SG_JOBS_OFFSET \ (QDMA_FLE_SG_ENTRY_OFFSET + \ sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2) @@ -85,10 +93,8 @@ struct qdma_device { uint16_t max_vqs; /** Device state - started or stopped */ uint8_t state; - /** FLE pool for the device */ - struct rte_mempool *fle_pool; - /** FLE pool size */ - int fle_pool_count; + /** FLE queue pool size */ + int fle_queue_pool_cnt; /** A lock to QDMA device whenever required */ rte_spinlock_t lock; }; @@ -135,6 +141,8 @@ struct qdma_virt_queue { struct rte_ring *status_ring; /** Associated hw queue */ struct qdma_hw_queue *hw_queue; + /** FLE pool for the queue */ + struct rte_mempool *fle_pool; /** Route by port */ struct rte_qdma_rbp rbp; /** Associated lcore id */ diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h index cfec303..3cd4167 100644 --- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h +++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h @@ -80,7 +80,7 @@ struct rte_qdma_config { * maximum number of inflight jobs on the QDMA device. This should * be power of 2. */ - int fle_pool_count; + int fle_queue_pool_cnt; }; struct rte_qdma_rbp { -- 2.7.4