From: Hernan Vargas <hernan.vargas@intel.com>
To: dev@dpdk.org, gakhil@marvell.com, trix@redhat.com,
maxime.coquelin@redhat.com
Cc: nicolas.chautru@intel.com, qi.z.zhang@intel.com,
Hernan Vargas <hernan.vargas@intel.com>
Subject: [PATCH v1 10/10] baseband/acc: cosmetic changes
Date: Fri, 30 Aug 2024 13:09:52 -0700 [thread overview]
Message-ID: <20240830200952.182685-11-hernan.vargas@intel.com> (raw)
In-Reply-To: <20240830200952.182685-1-hernan.vargas@intel.com>
Cosmetic code changes.
No functional impact.
Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
---
drivers/baseband/acc/rte_acc100_pmd.c | 3 +-
drivers/baseband/acc/rte_vrb_pmd.c | 72 ++++++++++++++++++---------
2 files changed, 50 insertions(+), 25 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 3501af2710fd..c33e2758b100 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -995,6 +995,7 @@ acc100_dev_info_get(struct rte_bbdev *dev,
dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_aqs_per_groups *
d->acc_conf.q_dl_5g.num_qgroups;
dev_info->num_queues[RTE_BBDEV_OP_FFT] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_MLDTS] = 0;
dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_qgroups;
@@ -4198,7 +4199,7 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc_device *d,
acc_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);
usleep(ACC_LONG_WAIT * 100);
if (desc->req.word0 != 2)
- rte_bbdev_log(WARNING, "DMA Response %#"PRIx32, desc->req.word0);
+ rte_bbdev_log(WARNING, "DMA Response %#"PRIx32"", desc->req.word0);
}
/* Reset LDPC Cores */
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 6353a27e69b2..aaad8570bc7e 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -956,6 +956,9 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
struct acc_queue *q;
int32_t q_idx;
int ret;
+ union acc_dma_desc *desc = NULL;
+ unsigned int desc_idx, b_idx;
+ int fcw_len;
if (d == NULL) {
rte_bbdev_log(ERR, "Undefined device");
@@ -982,16 +985,33 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
}
/* Prepare the Ring with default descriptor format. */
- union acc_dma_desc *desc = NULL;
- unsigned int desc_idx, b_idx;
- int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
- ACC_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
- ACC_FCW_TD_BLEN : (conf->op_type == RTE_BBDEV_OP_LDPC_DEC ?
- ACC_FCW_LD_BLEN : (conf->op_type == RTE_BBDEV_OP_FFT ?
- ACC_FCW_FFT_BLEN : ACC_FCW_MLDTS_BLEN))));
-
- if ((q->d->device_variant == VRB2_VARIANT) && (conf->op_type == RTE_BBDEV_OP_FFT))
- fcw_len = ACC_FCW_FFT_BLEN_3;
+ switch (conf->op_type) {
+ case RTE_BBDEV_OP_LDPC_ENC:
+ fcw_len = ACC_FCW_LE_BLEN;
+ break;
+ case RTE_BBDEV_OP_LDPC_DEC:
+ fcw_len = ACC_FCW_LD_BLEN;
+ break;
+ case RTE_BBDEV_OP_TURBO_DEC:
+ fcw_len = ACC_FCW_TD_BLEN;
+ break;
+ case RTE_BBDEV_OP_TURBO_ENC:
+ fcw_len = ACC_FCW_TE_BLEN;
+ break;
+ case RTE_BBDEV_OP_FFT:
+ fcw_len = ACC_FCW_FFT_BLEN;
+ if (q->d->device_variant == VRB2_VARIANT)
+ fcw_len = ACC_FCW_FFT_BLEN_3;
+ break;
+ case RTE_BBDEV_OP_MLDTS:
+ fcw_len = ACC_FCW_MLDTS_BLEN;
+ break;
+ default:
+ /* NOT REACHED. */
+ fcw_len = 0;
+ rte_bbdev_log(ERR, "Unexpected error in %s using type %d", __func__, conf->op_type);
+ break;
+ }
for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
desc = q->ring_addr + desc_idx;
@@ -1490,7 +1510,6 @@ vrb_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
return 0;
}
-
static int
vrb_queue_ops_dump(struct rte_bbdev *dev, uint16_t queue_id, FILE *f)
{
@@ -1773,8 +1792,7 @@ vrb_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,
if (fcw->hcout_en > 0) {
parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
* op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
- k0_p = (fcw->k0 > parity_offset) ?
- fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
+ k0_p = (fcw->k0 > parity_offset) ? fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
l = k0_p + fcw->rm_e;
harq_out_length = (uint16_t) fcw->hcin_size0;
@@ -1874,7 +1892,7 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
*h_out_length, next_triplet, ACC_DMA_BLKID_OUT_HARD);
if (unlikely(next_triplet < 0)) {
acc_error_log(q, (void *)op,
- "Mismatch between data to process and mbuf data length in bbdev_op: %p",
+ "Mismatch between data to process and mbuf data length in bbdev_op: %p\n",
op);
return -1;
}
@@ -2017,16 +2035,15 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
next_triplet++;
}
- if (check_bit(op->ldpc_dec.op_flags,
- RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
+ if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
if (op->ldpc_dec.harq_combined_output.data == 0) {
acc_error_log(q, (void *)op, "HARQ output is not defined\n");
return -1;
}
- /* Pruned size of the HARQ */
+ /* Pruned size of the HARQ. */
h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
- /* Non-Pruned size of the HARQ */
+ /* Non-Pruned size of the HARQ. */
h_np_size = fcw->hcout_offset > 0 ?
fcw->hcout_offset + fcw->hcout_size1 :
h_p_size;
@@ -2600,7 +2617,6 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
else
seg_total_left = fcw->rm_e;
-
ret = vrb_dma_desc_ld_fill(op, &desc->req, &input, h_output,
&in_offset, &h_out_offset,
&h_out_length, &mbuf_total_left,
@@ -2619,9 +2635,10 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
hq_output = op->ldpc_dec.harq_combined_output.data;
hq_len = op->ldpc_dec.harq_combined_output.length;
if (unlikely(!mbuf_append(hq_output_head, hq_output, hq_len))) {
- rte_bbdev_log(ERR, "HARQ output mbuf issue %d %d\n",
- hq_output->buf_len,
- hq_len);
+ acc_error_log(q, (void *)op,
+ "HARQ output mbuf cannot be appended Buffer %d Current data %d New data %d\n",
+ hq_output->buf_len, hq_output->data_len, hq_len);
+
return -1;
}
}
@@ -2662,7 +2679,6 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
desc_first = desc;
fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;
harq_layout = q->d->harq_layout;
-
vrb_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout, q->d->device_variant);
input = op->ldpc_dec.input.data;
@@ -2866,6 +2882,7 @@ vrb_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
acc_dma_enqueue(q, i, &q_data->queue_stats);
acc_update_qstat_enqueue(q_data, i, num - i);
+
return i;
}
@@ -2973,6 +2990,7 @@ vrb_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data,
}
descs_used = vrb2_enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs);
}
+
if (descs_used < 0) {
acc_enqueue_invalid(q_data);
break;
@@ -3079,6 +3097,7 @@ vrb_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
acc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
acc_update_qstat_enqueue(q_data, i, num - i);
+
return i;
}
@@ -3120,6 +3139,7 @@ vrb_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
acc_dma_enqueue(q, i, &q_data->queue_stats);
acc_update_qstat_enqueue(q_data, i, num - i);
+
return i;
}
@@ -3290,7 +3310,7 @@ vrb2_dequeue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **r
return 1;
}
-/* Dequeue one LDPC encode operations from device in TB mode.
+/* Dequeue one encode operations from device in TB mode.
* That operation may cover multiple descriptors.
*/
static inline int
@@ -3890,6 +3910,7 @@ vrb_enqueue_fft(struct rte_bbdev_queue_data *q_data,
acc_dma_enqueue(q, i, &q_data->queue_stats);
acc_update_qstat_enqueue(q_data, i, num - i);
+
return i;
}
@@ -3956,7 +3977,9 @@ vrb_dequeue_fft(struct rte_bbdev_queue_data *q_data,
q->aq_dequeued += aq_dequeued;
q->sw_ring_tail += dequeued_cbs;
+
acc_update_qstat_dequeue(q_data, i);
+
return i;
}
@@ -4211,6 +4234,7 @@ vrb2_enqueue_mldts(struct rte_bbdev_queue_data *q_data,
acc_dma_enqueue(q, enqueued_descs, &q_data->queue_stats);
acc_update_qstat_enqueue(q_data, i, num - i);
+
return i;
}
--
2.37.1
next prev parent reply other threads:[~2024-08-30 20:15 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-30 20:09 [PATCH v1 00/10] acc baseband PMD fix and updates for 24.11 Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 01/10] baseband/acc: fix access to deallocated mem Hernan Vargas
2024-09-13 14:49 ` Maxime Coquelin
2024-08-30 20:09 ` [PATCH v1 02/10] baseband/acc: queue allocation refactor Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 03/10] baseband/acc: configure max queues per device Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 04/10] baseband/acc: future proof structure comparison Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 05/10] baseband/acc: enhance SW ring alignment Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 06/10] baseband/acc: remove soft output bypass Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 07/10] baseband/acc: algorithm tuning for LDPC decoder Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 08/10] baseband/acc: remove check on HARQ memory Hernan Vargas
2024-08-30 20:09 ` [PATCH v1 09/10] baseband/acc: reset ring data valid bit Hernan Vargas
2024-08-30 20:09 ` Hernan Vargas [this message]
2024-08-30 20:23 ` [PATCH v1 00/10] acc baseband PMD fix and updates for 24.11 Vargas, Hernan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240830200952.182685-11-hernan.vargas@intel.com \
--to=hernan.vargas@intel.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=maxime.coquelin@redhat.com \
--cc=nicolas.chautru@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=trix@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).