DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Hernan Vargas <hernan.vargas@intel.com>,
	dev@dpdk.org, gakhil@marvell.com, trix@redhat.com
Cc: nicolas.chautru@intel.com, qi.z.zhang@intel.com
Subject: Re: [PATCH v3 6/7] baseband/acc: acc100 use desc helper functions
Date: Tue, 17 Jan 2023 11:50:15 +0100	[thread overview]
Message-ID: <81accdc2-254e-d809-f6b6-bb34dc848cd7@redhat.com> (raw)
In-Reply-To: <20230112193609.273578-7-hernan.vargas@intel.com>



On 1/12/23 20:36, Hernan Vargas wrote:
> Use the designated descriptor helper functions for descriptor address
> calculations.
> No functional impact.
> 
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
>   drivers/baseband/acc/rte_acc100_pmd.c | 35 +++++++++------------------
>   1 file changed, 12 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
> index ef1c488a1c..d3aaeb8f2c 100644
> --- a/drivers/baseband/acc/rte_acc100_pmd.c
> +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> @@ -2422,9 +2422,8 @@ enqueue_ldpc_enc_part_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
>   	struct rte_mbuf *output_head, *output;
>   	int i, next_triplet;
>   	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
> -	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_descs) & q->sw_ring_wrap_mask);
>   
> -	desc = q->ring_addr + desc_idx;
> +	desc = acc_desc(q, total_enqueued_descs);
>   	acc_fcw_le_fill(op, &desc->req.fcw_le, num_cbs, e);
>   
>   	/* This could be done at polling. */
> @@ -2613,7 +2612,6 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
>   	}
>   #endif
>   	uint8_t num_a, num_b;
> -	uint16_t desc_idx;
>   	uint8_t r = op->ldpc_enc.tb_params.r;
>   	uint8_t cab =  op->ldpc_enc.tb_params.cab;
>   	union acc_dma_desc *desc;
> @@ -2655,16 +2653,15 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
>   
>   	return_descs = enq_descs - init_enq_descs;
>   	/* Keep total number of CBs in first TB. */
> -	desc_idx = ((q->sw_ring_head + init_enq_descs) & q->sw_ring_wrap_mask);
> -	desc = q->ring_addr + desc_idx;
> +	desc = acc_desc(q, init_enq_descs);
>   	desc->req.cbs_in_tb = return_descs; /** Actual number of descriptors. */
>   	desc->req.op_addr = op;
>   
>   	/* Set SDone on last CB descriptor for TB mode. */
> -	desc_idx = ((q->sw_ring_head + enq_descs - 1) & q->sw_ring_wrap_mask);
> -	desc = q->ring_addr + desc_idx;
> +	desc = acc_desc(q, enq_descs - 1);
>   	desc->req.sdone_enable = 1;
>   	desc->req.op_addr = op;
> +
>   	return return_descs;
>   }
>   
> @@ -3275,7 +3272,7 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
>   		h_out_length, mbuf_total_left, seg_total_left;
>   	struct rte_mbuf *input, *h_output_head, *h_output,
>   		*s_output_head, *s_output;
> -	uint16_t desc_idx, current_enqueued_cbs = 0;
> +	uint16_t current_enqueued_cbs = 0;
>   	uint64_t fcw_offset;
>   
>   #ifndef RTE_LIBRTE_BBDEV_SKIP_VALIDATE
> @@ -3290,9 +3287,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
>   	}
>   #endif
>   
> -	desc_idx = acc_desc_idx(q, total_enqueued_cbs);
> -	desc = q->ring_addr + desc_idx;
> -	fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;
> +	desc = acc_desc(q, total_enqueued_cbs);
> +	fcw_offset = (acc_desc_idx(q, total_enqueued_cbs) << 8) + ACC_DESC_FCW_OFFSET;
>   	acc100_fcw_td_fill(op, &desc->req.fcw_td);
>   
>   	input = op->turbo_dec.input.data;
> @@ -3777,7 +3773,6 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
>   
>   	/* Clearing status, it will be set based on response */
>   	op->status = 0;
> -
>   	op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
>   	op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
>   
> @@ -4023,8 +4018,8 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,
>   		atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
>   				__ATOMIC_RELAXED);
>   		rsp.val = atom_desc.rsp.val;
> -		rte_bbdev_log_debug("Resp. desc %p: %x", desc,
> -				rsp.val);
> +		rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
> +						desc, rsp.val, cb_idx, cbs_in_tb);
>   
>   		op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
>   		op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
> @@ -4077,8 +4072,7 @@ acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,
>   		return 0;
>   	}
>   #endif
> -	op = (q->ring_addr + (q->sw_ring_tail &
> -			q->sw_ring_wrap_mask))->req.op_addr;
> +	op = acc_op_tail(q, 0);
>   	if (unlikely(ops == NULL || op == NULL))
>   		return 0;
>   	cbm = op->turbo_enc.code_block_mode;
> @@ -4120,7 +4114,6 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
>   	uint16_t i, dequeued_ops = 0, dequeued_descs = 0;
>   	int ret, cbm;
>   	struct rte_bbdev_enc_op *op;
> -	union acc_dma_desc *desc;
>   
>   	if (q == NULL)
>   		return 0;
> @@ -4128,10 +4121,7 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
>   	if (unlikely(ops == 0))
>   		return 0;
>   #endif
> -	desc = q->ring_addr + (q->sw_ring_tail & q->sw_ring_wrap_mask);
> -	if (unlikely(desc == NULL))
> -		return 0;
> -	op = desc->req.op_addr;
> +	op = acc_op_tail(q, 0);
>   	if (unlikely(ops == NULL || op == NULL))
>   		return 0;
>   	cbm = op->ldpc_enc.code_block_mode;
> @@ -4227,8 +4217,7 @@ acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
>   	dequeue_num = RTE_MIN(avail, num);
>   
>   	for (i = 0; i < dequeue_num; ++i) {
> -		op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
> -			& q->sw_ring_wrap_mask))->req.op_addr;
> +		op = acc_op_tail(q, dequeued_cbs);
>   		if (unlikely(op == NULL))
>   			break;
>   		if (op->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


  reply	other threads:[~2023-01-17 10:50 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-12 19:36 [PATCH v3 0/7] baseband/acc: changes for 23.03 Hernan Vargas
2023-01-12 19:36 ` [PATCH v3 1/7] baseband/acc: acc100 free harq layout pointer Hernan Vargas
2023-01-17 10:49   ` Maxime Coquelin
2023-01-12 19:36 ` [PATCH v3 2/7] baseband/acc: acc100 fix iteration counter in TB Hernan Vargas
2023-01-17 10:49   ` Maxime Coquelin
2023-01-12 19:36 ` [PATCH v3 3/7] baseband/acc: acc100 fix multiplexing multiple ops Hernan Vargas
2023-01-17 10:49   ` Maxime Coquelin
2023-01-12 19:36 ` [PATCH v3 4/7] baseband/acc: acc100 fix queue mapping to 64 bits Hernan Vargas
2023-01-17 10:49   ` Maxime Coquelin
2023-01-12 19:36 ` [PATCH v3 5/7] baseband/acc: acc100 use define constant Hernan Vargas
2023-01-17 10:50   ` Maxime Coquelin
2023-01-12 19:36 ` [PATCH v3 6/7] baseband/acc: acc100 use desc helper functions Hernan Vargas
2023-01-17 10:50   ` Maxime Coquelin [this message]
2023-01-12 19:36 ` [PATCH v3 7/7] baseband/acc: acc100 ignore missing mempools Hernan Vargas
2023-01-17 10:50   ` Maxime Coquelin
2023-01-17 10:49 ` [PATCH v3 0/7] baseband/acc: changes for 23.03 Maxime Coquelin
2023-01-19 10:06 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=81accdc2-254e-d809-f6b6-bb34dc848cd7@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=hernan.vargas@intel.com \
    --cc=nicolas.chautru@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=trix@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).