From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5CBC745540; Tue, 2 Jul 2024 00:34:28 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 18E2940669; Tue, 2 Jul 2024 00:34:17 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.16]) by mails.dpdk.org (Postfix) with ESMTP id 70E7940612 for ; Tue, 2 Jul 2024 00:34:14 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1719873255; x=1751409255; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=N8AuWTOthltPahny6L2DamYY4O9i8YVyDzSQm48Kwow=; b=WS9B/HXyK7vv7GJqAgEHVBkjaKOhlMHKLJy+CbawyyvGuRFE2o0eRvli UBK/1rt2uP9I5Y9MszyHKj5TQKsXZdgh5ENDu9S5r6tgEUw/XUtQhzC39 8sVef+7+x1oovCQiBDpZJQSEhjXldTbiNFlbFxsOneh/9Vih3OJacL7wp rTwlqFO+iypP7mPBfFhXrbjsTX5oKnkQWzcHLJCfj2DjnMBq6Kn+IvOMA 3qHMU9scJUd6hwiLIb4b3J0260xV+EOgIrQgXKFzKdUDCOP2AUfZJ7L/q Kg20G8mvdebW6IdtrpBnj6EVLh5t9A9qaVUtVk2f6HvTe+7tqyTLPPdUa w==; X-CSE-ConnectionGUID: p+QydoiYRk6h+aEhuD91Vw== X-CSE-MsgGUID: 6k25jIZZTyKCPI6RD81G3w== X-IronPort-AV: E=McAfee;i="6700,10204,11120"; a="12345773" X-IronPort-AV: E=Sophos;i="6.09,177,1716274800"; d="scan'208";a="12345773" Received: from orviesa010.jf.intel.com ([10.64.159.150]) by fmvoesa110.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Jul 2024 15:34:12 -0700 X-CSE-ConnectionGUID: 8XTMbppcTk2PwFuYSN3BvQ== X-CSE-MsgGUID: 8lm3nPCBTxSolM1Y+qVcDA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.09,177,1716274800"; d="scan'208";a="45573294" Received: from spr-npg-bds1-eec2.sn.intel.com (HELO spr-npg-bds1-eec2..) ([10.233.181.123]) by orviesa010.jf.intel.com with ESMTP; 01 Jul 2024 15:34:12 -0700 From: Nicolas Chautru To: dev@dpdk.org, maxime.coquelin@redhat.com Cc: hemant.agrawal@nxp.com, david.marchand@redhat.com, hernan.vargas@intel.com, Nicolas Chautru Subject: [PATCH v1 2/2] baseband/acc: improvement to logging mechanism Date: Mon, 1 Jul 2024 15:34:24 -0700 Message-Id: <20240701223424.427588-3-nicolas.chautru@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240701223424.427588-1-nicolas.chautru@intel.com> References: <20240701223424.427588-1-nicolas.chautru@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Support the new dev op to dump operations information related to a given queue and information on previous errors detected by the driver and tracked internally in PMD. Signed-off-by: Nicolas Chautru --- drivers/baseband/acc/acc_common.h | 37 +++++++++ drivers/baseband/acc/rte_vrb_pmd.c | 128 +++++++++++++++++++++++------ 2 files changed, 138 insertions(+), 27 deletions(-) diff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h index e249f37e38..c8914b23e0 100644 --- a/drivers/baseband/acc/acc_common.h +++ b/drivers/baseband/acc/acc_common.h @@ -149,6 +149,8 @@ #define VRB2_VF_ID_SHIFT 6 #define ACC_MAX_FFT_WIN 16 +#define ACC_MAX_LOGLEN 256 +#define ACC_MAX_BUFFERLEN 256 extern int acc_common_logtype; @@ -646,8 +648,43 @@ struct __rte_cache_aligned acc_queue { rte_iova_t fcw_ring_addr_iova; int8_t *derm_buffer; /* interim buffer for de-rm in SDK */ struct acc_device *d; + char error_bufs[ACC_MAX_BUFFERLEN][ACC_MAX_LOGLEN]; /**< Buffer for error log. */ + uint16_t error_head; /**< Head - Buffer for error log. */ + uint16_t error_wrap; /**< Wrap Counter - Buffer for error log. */ }; +/** + * @brief Report error both through RTE logging and into internal driver memory. + * + * This function is used to log an error for a specific ACC queue and operation. + * + * @param q Pointer to the ACC queue. + * @param op Pointer to the operation. + * @param fmt Format string for the error message. + * @param ... Additional arguments for the format string. + */ +__rte_format_printf(3, 4) +static inline void +acc_error_log(struct acc_queue *q, void *op, const char *fmt, ...) +{ + va_list args, args2; + + va_start(args, fmt); + va_copy(args2, args); + rte_vlog(RTE_LOG_ERR, acc_common_logtype, fmt, args); + vsnprintf(q->error_bufs[q->error_head], ACC_MAX_LOGLEN, fmt, args2); + q->error_head++; + snprintf(q->error_bufs[q->error_head], ACC_MAX_LOGLEN, + "%s", rte_bbdev_ops_param_string(op, q->op_type)); + q->error_head++; + if (q->error_head == ACC_MAX_LOGLEN) { + q->error_head = 0; + q->error_wrap++; + } + va_end(args); + va_end(args2); +} + /* Write to MMIO register address */ static inline void mmio_write(void *addr, uint32_t value) diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c index 585dc49bd6..9d0145d09b 100644 --- a/drivers/baseband/acc/rte_vrb_pmd.c +++ b/drivers/baseband/acc/rte_vrb_pmd.c @@ -1022,6 +1022,10 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base, d->queue_offset(d->pf_device, q->vf_id, q->qgrp_id, q->aq_id)); + /** initialize the error buffer. */ + q->error_head = 0; + q->error_wrap = 0; + rte_bbdev_log_debug( "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p base %p\n", dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id, @@ -1434,6 +1438,74 @@ vrb_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id) return 0; } + +static int +vrb_queue_ops_dump(struct rte_bbdev *dev, uint16_t queue_id, FILE *f) +{ + struct acc_queue *q = dev->data->queues[queue_id].queue_private; + struct rte_bbdev_dec_op *op; + uint16_t start_err, end_err, i, int_nb; + volatile union acc_info_ring_data *ring_data; + uint16_t info_ring_head = q->d->info_ring_head; + + if (f == NULL) { + rte_bbdev_log(ERR, "Invalid File input"); + return -EINVAL; + } + + /** Print generic information on queue status. */ + fprintf(f, "Dump of operations %s on Queue %d by %s\n", + rte_bbdev_op_type_str(q->op_type), queue_id, dev->device->driver->name); + fprintf(f, " AQ Enqueued %d Dequeued %d Depth %d - Available Enq %d Deq %d\n", + q->aq_enqueued, q->aq_dequeued, q->aq_depth, + acc_ring_avail_enq(q), acc_ring_avail_deq(q)); + + /** Print information captured in the error buffer. */ + if (q->error_wrap == 0) { + start_err = 0; + end_err = q->error_head; + } else { + start_err = q->error_head; + end_err = q->error_head + ACC_MAX_BUFFERLEN; + } + fprintf(f, "Error Buffer - Head %d Wrap %d\n", q->error_head, q->error_wrap); + for (i = start_err; i < end_err; ++i) + fprintf(f, " %d\t%s", i, q->error_bufs[i % ACC_MAX_BUFFERLEN]); + + /** Print information captured in the info ring. */ + if (q->d->info_ring != NULL) { + fprintf(f, "Info Ring Buffer - Head %d\n", q->d->info_ring_head); + ring_data = q->d->info_ring + (q->d->info_ring_head & ACC_INFO_RING_MASK); + while (ring_data->valid) { + int_nb = int_from_ring(*ring_data, q->d->device_variant); + if ((int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || ( + int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) { + fprintf(f, " InfoRing: ITR:%d Info:0x%x", + int_nb, ring_data->detailed_info); + /* Initialize Info Ring entry and move forward. */ + ring_data->valid = 0; + } + info_ring_head++; + ring_data = q->d->info_ring + (info_ring_head & ACC_INFO_RING_MASK); + } + } + + fprintf(f, "Ring Content - Head %d Tail %d Depth %d\n", + q->sw_ring_head, q->sw_ring_tail, q->sw_ring_depth); + /** Print information about each operation in the software ring. */ + for (i = 0; i < q->sw_ring_depth; ++i) { + op = (q->ring_addr + i)->req.op_addr; + if (op != NULL) + fprintf(f, " %d\tn %d %s", + i, (q->ring_addr + i)->req.numCBs, + rte_bbdev_ops_param_string(op, q->op_type)); + } + + fprintf(f, "== End of File ==\n"); + + return 0; +} + static const struct rte_bbdev_ops vrb_bbdev_ops = { .setup_queues = vrb_setup_queues, .intr_enable = vrb_intr_enable, @@ -1443,7 +1515,8 @@ static const struct rte_bbdev_ops vrb_bbdev_ops = { .queue_release = vrb_queue_release, .queue_stop = vrb_queue_stop, .queue_intr_enable = vrb_queue_intr_enable, - .queue_intr_disable = vrb_queue_intr_disable + .queue_intr_disable = vrb_queue_intr_disable, + .queue_ops_dump = vrb_queue_ops_dump }; /* PCI PF address map. */ @@ -1680,7 +1753,7 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, uint32_t *in_offset, uint32_t *h_out_offset, uint32_t *s_out_offset, uint32_t *h_out_length, uint32_t *s_out_length, uint32_t *mbuf_total_left, - uint32_t *seg_total_left, uint8_t r) + uint32_t *seg_total_left, uint8_t r, struct acc_queue *q) { int next_triplet = 1; /* FCW already done. */ uint16_t k; @@ -1724,8 +1797,8 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, kw = RTE_ALIGN_CEIL(k + 4, 32) * 3; if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) { - rte_bbdev_log(ERR, - "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u", + acc_error_log(q, (void *)op, + "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\n", *mbuf_total_left, kw); return -1; } @@ -1735,8 +1808,8 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_DEC_SCATTER_GATHER)); if (unlikely(next_triplet < 0)) { - rte_bbdev_log(ERR, - "Mismatch between data to process and mbuf data length in bbdev_op: %p", + acc_error_log(q, (void *)op, + "Mismatch between data to process and mbuf data length in bbdev_op: %p\n", op); return -1; } @@ -1748,7 +1821,7 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, desc, h_output, *h_out_offset, *h_out_length, next_triplet, ACC_DMA_BLKID_OUT_HARD); if (unlikely(next_triplet < 0)) { - rte_bbdev_log(ERR, + acc_error_log(q, (void *)op, "Mismatch between data to process and mbuf data length in bbdev_op: %p", op); return -1; @@ -1760,7 +1833,7 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, /* Soft output. */ if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) { if (op->turbo_dec.soft_output.data == 0) { - rte_bbdev_log(ERR, "Soft output is not defined"); + acc_error_log(q, (void *)op, "Soft output is not defined\n"); return -1; } if (check_bit(op->turbo_dec.op_flags, @@ -1773,8 +1846,8 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, *s_out_offset, *s_out_length, next_triplet, ACC_DMA_BLKID_OUT_SOFT); if (unlikely(next_triplet < 0)) { - rte_bbdev_log(ERR, - "Mismatch between data to process and mbuf data length in bbdev_op: %p", + acc_error_log(q, (void *)op, + "Mismatch between data to process and mbuf data length in bbdev_op: %p\n", op); return -1; } @@ -1797,7 +1870,8 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, struct rte_mbuf **input, struct rte_mbuf *h_output, uint32_t *in_offset, uint32_t *h_out_offset, uint32_t *h_out_length, uint32_t *mbuf_total_left, - uint32_t *seg_total_left, struct acc_fcw_ld *fcw, uint16_t device_variant) + uint32_t *seg_total_left, struct acc_fcw_ld *fcw, uint16_t device_variant, + struct acc_queue *q) { struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec; int next_triplet = 1; /* FCW already done. */ @@ -1808,8 +1882,8 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, if (device_variant == VRB1_VARIANT) { if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION) || check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)) { - rte_bbdev_log(ERR, - "VRB1 does not support the requested capabilities %x", + acc_error_log(q, (void *)op, + "VRB1 does not support the requested capabilities %x\n", op->ldpc_dec.op_flags); return -1; } @@ -1829,8 +1903,8 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, output_length = K - dec->n_filler - crc24_overlap; if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < input_length))) { - rte_bbdev_log(ERR, - "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u", + acc_error_log(q, (void *)op, + "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\n", *mbuf_total_left, input_length); return -1; } @@ -1842,15 +1916,15 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)); if (unlikely(next_triplet < 0)) { - rte_bbdev_log(ERR, - "Mismatch between data to process and mbuf data length in bbdev_op: %p", + acc_error_log(q, (void *)op, + "Mismatch between data to process and mbuf data length in bbdev_op: %p\n", op); return -1; } if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) { if (op->ldpc_dec.harq_combined_input.data == 0) { - rte_bbdev_log(ERR, "HARQ input is not defined"); + acc_error_log(q, (void *)op, "HARQ input is not defined\n"); return -1; } h_p_size = fcw->hcin_size0 + fcw->hcin_size1; @@ -1859,7 +1933,7 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, else if (fcw->hcin_decomp_mode == 4) h_p_size = h_p_size / 2; if (op->ldpc_dec.harq_combined_input.data == 0) { - rte_bbdev_log(ERR, "HARQ input is not defined"); + acc_error_log(q, (void *)op, "HARQ input is not defined\n"); return -1; } acc_dma_fill_blk_type( @@ -1882,7 +1956,7 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)) { if (op->ldpc_dec.soft_output.data == 0) { - rte_bbdev_log(ERR, "Soft output is not defined"); + acc_error_log(q, (void *)op, "Soft output is not defined\n"); return -1; } dec->soft_output.length = fcw->rm_e; @@ -1894,7 +1968,7 @@ vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) { if (op->ldpc_dec.harq_combined_output.data == 0) { - rte_bbdev_log(ERR, "HARQ output is not defined"); + acc_error_log(q, (void *)op, "HARQ output is not defined\n"); return -1; } @@ -2326,8 +2400,8 @@ vrb2_enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op in_length_in_bytes, &seg_total_left, next_triplet, check_bit(enc->op_flags, RTE_BBDEV_LDPC_ENC_SCATTER_GATHER)); if (unlikely(next_triplet < 0)) { - rte_bbdev_log(ERR, - "Mismatch between data to process and mbuf data length in bbdev_op: %p", + acc_error_log(q, (void *)op, + "Mismatch between data to process and mbuf data length in bbdev_op: %p\n", op); return -1; } @@ -2399,7 +2473,7 @@ enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, ret = vrb_dma_desc_td_fill(op, &desc->req, &input, h_output, s_output, &in_offset, &h_out_offset, &s_out_offset, &h_out_length, &s_out_length, &mbuf_total_left, - &seg_total_left, 0); + &seg_total_left, 0, q); if (unlikely(ret < 0)) return ret; @@ -2478,7 +2552,7 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, ret = vrb_dma_desc_ld_fill(op, &desc->req, &input, h_output, &in_offset, &h_out_offset, &h_out_length, &mbuf_total_left, - &seg_total_left, fcw, q->d->device_variant); + &seg_total_left, fcw, q->d->device_variant, q); if (unlikely(ret < 0)) return ret; } @@ -2572,7 +2646,7 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, h_output, &in_offset, &h_out_offset, &h_out_length, &mbuf_total_left, &seg_total_left, - &desc->req.fcw_ld, q->d->device_variant); + &desc->req.fcw_ld, q->d->device_variant, q); if (unlikely(ret < 0)) return ret; @@ -2658,7 +2732,7 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, ret = vrb_dma_desc_td_fill(op, &desc->req, &input, h_output, s_output, &in_offset, &h_out_offset, &s_out_offset, &h_out_length, &s_out_length, - &mbuf_total_left, &seg_total_left, r); + &mbuf_total_left, &seg_total_left, r, q); if (unlikely(ret < 0)) return ret; -- 2.34.1