From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 32BE2A054D for ; Wed, 16 Nov 2022 21:53:21 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2E58F410D7; Wed, 16 Nov 2022 21:53:21 +0100 (CET) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id E254340DFB for ; Wed, 16 Nov 2022 21:53:18 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668631999; x=1700167999; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=SsDIMUb04APruQCGpf6uXy7mTUD41c8vyxS5++qvmf4=; b=dhTxiCzjLV9MN/IxTkLYOh7uW0BzjbMZj5m775Q9nNQktIMr9zjsFJXe wAysGLY2xGMmMC5NL1VJbi+eaPkc6veebbcMy9H6QI2Ac9t/y+RhoQSjy 5FraoZV5md/sOIam1Mku+e+Mie47F0dZd87RPXx/ekqpVpUY9Na9P0eql Uo/zx6SpJnCoM/I1QaEbAEOD4THFY+8SNtyDkcHDajy4BF63D4wQmnDWe 5/8n9WofCItTu3OclUWim+Vo1Cz7iSo70xgzf6EnL/NVab21QqWXuLvy7 lBJkpw3DqZewd4sV+rapz03fSSBKLMR/bn55t2f4f+NeKWvra+S/sLyhU A==; X-IronPort-AV: E=McAfee;i="6500,9779,10533"; a="398946820" X-IronPort-AV: E=Sophos;i="5.96,169,1665471600"; d="scan'208";a="398946820" Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Nov 2022 12:51:46 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10533"; a="590334537" X-IronPort-AV: E=Sophos;i="5.96,169,1665471600"; d="scan'208";a="590334537" Received: from unknown (HELO csl-npg-qt0.la.intel.com) ([10.233.181.103]) by orsmga003.jf.intel.com with ESMTP; 16 Nov 2022 12:51:45 -0800 From: Hernan Vargas To: stable@dpdk.org, ktraynor@redhat.com Cc: nicolas.chautru@intel.com, Hernan Vargas , Maxime Coquelin Subject: [PATCH 21.11 7/7] baseband/acc100: fix double MSI intr in TB mode Date: Wed, 16 Nov 2022 20:46:52 -0800 Message-Id: <20221117044652.163000-8-hernan.vargas@intel.com> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20221117044652.163000-1-hernan.vargas@intel.com> References: <20221117044652.163000-1-hernan.vargas@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org [ upstream commit beaf1f876c2c871f0197f9dd090eabed8f7e1e3d ] Fix logical bug in SW causing MSI to be issued twice when running in transport block mode. Fixes: f404dfe35cc ("baseband/acc100: support 4G processing") Fixes: bec597b78a0 ("baseband/acc200: add LTE processing") Signed-off-by: Hernan Vargas Reviewed-by: Maxime Coquelin --- drivers/baseband/acc100/rte_acc100_pmd.c | 39 ++++-------------------- 1 file changed, 6 insertions(+), 33 deletions(-) diff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c index 3030265c16..eba6eb0df4 100644 --- a/drivers/baseband/acc100/rte_acc100_pmd.c +++ b/drivers/baseband/acc100/rte_acc100_pmd.c @@ -2073,6 +2073,7 @@ acc100_dma_enqueue(struct acc100_queue *q, uint16_t n, struct rte_bbdev_stats *queue_stats) { union acc100_enqueue_reg_fmt enq_req; + union acc100_dma_desc *desc; #ifdef RTE_BBDEV_OFFLOAD_COST uint64_t start_time = 0; queue_stats->acc_offload_cycles = 0; @@ -2080,13 +2081,17 @@ acc100_dma_enqueue(struct acc100_queue *q, uint16_t n, RTE_SET_USED(queue_stats); #endif + /* Set Sdone and IRQ enable bit on last descriptor. */ + desc = q->ring_addr + ((q->sw_ring_head + n - 1) & q->sw_ring_wrap_mask); + desc->req.sdone_enable = 1; + desc->req.irq_enable = q->irq_enable; + enq_req.val = 0; /* Setting offset, 100b for 256 DMA Desc */ enq_req.addr_offset = ACC100_DESC_OFFSET; /* Split ops into batches */ do { - union acc100_dma_desc *desc; uint16_t enq_batch_size; uint64_t offset; rte_iova_t req_elem_addr; @@ -2638,7 +2643,6 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op, /* Set SDone on last CB descriptor for TB mode. */ desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; return current_enqueued_cbs; } @@ -3202,7 +3206,6 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, #endif /* Set SDone on last CB descriptor for TB mode */ desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; return current_enqueued_cbs; } @@ -3304,7 +3307,6 @@ enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, #endif /* Set SDone on last CB descriptor for TB mode */ desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; return current_enqueued_cbs; } @@ -3407,7 +3409,6 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data, struct acc100_queue *q = q_data->queue_private; int32_t avail = acc100_ring_avail_enq(q); uint16_t i; - union acc100_dma_desc *desc; int ret; for (i = 0; i < num; ++i) { @@ -3424,12 +3425,6 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data, if (unlikely(i == 0)) return 0; /* Nothing to enqueue */ - /* Set SDone in last CB in enqueued ops for CB mode*/ - desc = q->ring_addr + ((q->sw_ring_head + i - 1) - & q->sw_ring_wrap_mask); - desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; - acc100_dma_enqueue(q, i, &q_data->queue_stats); /* Update stats */ @@ -3463,7 +3458,6 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, struct acc100_queue *q = q_data->queue_private; int32_t avail = acc100_ring_avail_enq(q); uint16_t i = 0; - union acc100_dma_desc *desc; int ret, desc_idx = 0; int16_t enq, left = num; @@ -3491,12 +3485,6 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, if (unlikely(i == 0)) return 0; /* Nothing to enqueue */ - /* Set SDone in last CB in enqueued ops for CB mode*/ - desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1) - & q->sw_ring_wrap_mask); - desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; - acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats); /* Update stats */ @@ -3593,7 +3581,6 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data, struct acc100_queue *q = q_data->queue_private; int32_t avail = acc100_ring_avail_enq(q); uint16_t i; - union acc100_dma_desc *desc; int ret; for (i = 0; i < num; ++i) { @@ -3610,12 +3597,6 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data, if (unlikely(i == 0)) return 0; /* Nothing to enqueue */ - /* Set SDone in last CB in enqueued ops for CB mode*/ - desc = q->ring_addr + ((q->sw_ring_head + i - 1) - & q->sw_ring_wrap_mask); - desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; - acc100_dma_enqueue(q, i, &q_data->queue_stats); /* Update stats */ @@ -3681,7 +3662,6 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, struct acc100_queue *q = q_data->queue_private; int32_t avail = acc100_ring_avail_enq(q); uint16_t i; - union acc100_dma_desc *desc; int ret; bool same_op = false; for (i = 0; i < num; ++i) { @@ -3707,13 +3687,6 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, if (unlikely(i == 0)) return 0; /* Nothing to enqueue */ - /* Set SDone in last CB in enqueued ops for CB mode*/ - desc = q->ring_addr + ((q->sw_ring_head + i - 1) - & q->sw_ring_wrap_mask); - - desc->req.sdone_enable = 1; - desc->req.irq_enable = q->irq_enable; - acc100_dma_enqueue(q, i, &q_data->queue_stats); /* Update stats */ -- 2.37.1