From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DF7294548C; Tue, 18 Jun 2024 13:43:36 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 20B79427C1; Tue, 18 Jun 2024 13:42:27 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.12]) by mails.dpdk.org (Postfix) with ESMTP id 3C07D41157 for ; Tue, 18 Jun 2024 13:42:22 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1718710943; x=1750246943; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=aOq/IkAAzBCqtSq2OcTfvfRD4Jb4Nddw1HEQRVCv+cQ=; b=kVows54zzqKrBS6pRe5k2F4p7AXlqpbkukNHluINaFuQBD/PTxZjdarB TwHUsC82moM6Vgmr8A5EdIAyIbBWEMHBaQQ8SVwzVceV30zLQ6jHx0h2G HYULMUQdMX/sxqyyAikSgKLO9PL9XDlauqTou4swzD8Z+bt2THBp+tUXZ jf4uWSo4WhbSD9/zrzIk92igNPCeAFkCkbtYXborNRpe2D4GJ2tqcepyT Ju3tiIDR0fd558LurwUshpGyXtuqu6xv7hN6M8Mmffzz6iYQM/HjMDiKZ tgltEREBzJ59JbtkpYolSSyFrkJ+KI6YCowbqSZ7w3Be/8vYwXZGMlQEQ A==; X-CSE-ConnectionGUID: 8EnO0DD3Rn2q5CnPeYCe1g== X-CSE-MsgGUID: PYRnbkudTtupfKtDNUzuVA== X-IronPort-AV: E=McAfee;i="6700,10204,11106"; a="19443333" X-IronPort-AV: E=Sophos;i="6.08,247,1712646000"; d="scan'208";a="19443333" Received: from fmviesa009.fm.intel.com ([10.60.135.149]) by fmvoesa106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jun 2024 04:42:22 -0700 X-CSE-ConnectionGUID: NzJm2XbwQ6ikJ2xzR/8bzg== X-CSE-MsgGUID: tsARW16vRFSwkf6Vbobv5Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.08,247,1712646000"; d="scan'208";a="41621162" Received: from unknown (HELO npf-hyd-clx-03..) ([10.145.170.182]) by fmviesa009.fm.intel.com with ESMTP; 18 Jun 2024 04:42:20 -0700 From: Soumyadeep Hore To: bruce.richardson@intel.com, anatoly.burakov@intel.com Cc: dev@dpdk.org Subject: [PATCH v4 12/21] common/idpf: avoid variable 0-init Date: Tue, 18 Jun 2024 10:57:13 +0000 Message-ID: <20240618105722.2326987-13-soumyadeep.hore@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240618105722.2326987-1-soumyadeep.hore@intel.com> References: <20240612035257.2245824-11-soumyadeep.hore@intel.com> <20240618105722.2326987-1-soumyadeep.hore@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Don't initialize the variables if not needed. Also use 'err' instead of 'status', 'ret_code', 'ret' etc. for consistency and change the return label 'sq_send_command_out' to 'err_unlock'. Signed-off-by: Soumyadeep Hore --- drivers/common/idpf/base/idpf_controlq.c | 60 +++++++++---------- .../common/idpf/base/idpf_controlq_setup.c | 18 +++--- 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c index d9ca33cdb9..65e5599614 100644 --- a/drivers/common/idpf/base/idpf_controlq.c +++ b/drivers/common/idpf/base/idpf_controlq.c @@ -61,7 +61,7 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, */ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) { - int i = 0; + int i; for (i = 0; i < cq->ring_size; i++) { struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); @@ -134,7 +134,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, { struct idpf_ctlq_info *cq; bool is_rxq = false; - int status = 0; + int err; if (!qinfo->len || !qinfo->buf_size || qinfo->len > IDPF_CTLQ_MAX_RING_SIZE || @@ -160,14 +160,14 @@ int idpf_ctlq_add(struct idpf_hw *hw, is_rxq = true; /* fallthrough */ case IDPF_CTLQ_TYPE_MAILBOX_TX: - status = idpf_ctlq_alloc_ring_res(hw, cq); + err = idpf_ctlq_alloc_ring_res(hw, cq); break; default: - status = -EINVAL; + err = -EINVAL; break; } - if (status) + if (err) goto init_free_q; if (is_rxq) { @@ -178,7 +178,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_calloc(hw, qinfo->len, sizeof(struct idpf_ctlq_msg *)); if (!cq->bi.tx_msg) { - status = -ENOMEM; + err = -ENOMEM; goto init_dealloc_q_mem; } } @@ -192,7 +192,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list); *cq_out = cq; - return status; + return 0; init_dealloc_q_mem: /* free ring buffers and the ring itself */ @@ -201,7 +201,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_free(hw, cq); cq = NULL; - return status; + return err; } /** @@ -232,27 +232,27 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, struct idpf_ctlq_create_info *q_info) { struct idpf_ctlq_info *cq = NULL, *tmp = NULL; - int ret_code = 0; - int i = 0; + int err; + int i; LIST_INIT(&hw->cq_list_head); for (i = 0; i < num_q; i++) { struct idpf_ctlq_create_info *qinfo = q_info + i; - ret_code = idpf_ctlq_add(hw, qinfo, &cq); - if (ret_code) + err = idpf_ctlq_add(hw, qinfo, &cq); + if (err) goto init_destroy_qs; } - return ret_code; + return 0; init_destroy_qs: LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head, idpf_ctlq_info, cq_list) idpf_ctlq_remove(hw, cq); - return ret_code; + return err; } /** @@ -286,9 +286,9 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) { struct idpf_ctlq_desc *desc; - int num_desc_avail = 0; - int status = 0; - int i = 0; + int num_desc_avail; + int err = 0; + int i; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -298,8 +298,8 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { - status = -ENOSPC; - goto sq_send_command_out; + err = -ENOSPC; + goto err_unlock; } for (i = 0; i < num_q_msg; i++) { @@ -370,10 +370,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_use); -sq_send_command_out: +err_unlock: idpf_release_lock(&cq->cq_lock); - return status; + return err; } /** @@ -397,9 +397,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, struct idpf_ctlq_msg *msg_status[], bool force) { struct idpf_ctlq_desc *desc; - u16 i = 0, num_to_clean; + u16 i, num_to_clean; u16 ntc, desc_err; - int ret = 0; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -446,7 +445,7 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, /* Return number of descriptors actually cleaned */ *clean_count = i; - return ret; + return 0; } /** @@ -513,7 +512,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 ntp = cq->next_to_post; bool buffs_avail = false; u16 tbp = ntp + 1; - int status = 0; int i = 0; if (*buff_count > cq->ring_size) @@ -614,7 +612,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; - return status; + return 0; } /** @@ -633,8 +631,8 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, { u16 num_to_clean, ntc, ret_val, flags; struct idpf_ctlq_desc *desc; - int ret_code = 0; - u16 i = 0; + int err = 0; + u16 i; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -667,7 +665,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, IDPF_CTLQ_FLAG_FTYPE_S; if (flags & IDPF_CTLQ_FLAG_ERR) - ret_code = -EBADMSG; + err = -EBADMSG; q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high); q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low); @@ -713,7 +711,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, *num_q_msg = i; if (*num_q_msg == 0) - ret_code = -ENOMSG; + err = -ENOMSG; - return ret_code; + return err; } diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c index 21f43c74f5..cd6bcb1cf0 100644 --- a/drivers/common/idpf/base/idpf_controlq_setup.c +++ b/drivers/common/idpf/base/idpf_controlq_setup.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2023 Intel Corporation + * Copyright(c) 2001-2024 Intel Corporation */ @@ -34,7 +34,7 @@ static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - int i = 0; + int i; /* Do not allocate DMA buffers for transmit queues */ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) @@ -153,20 +153,20 @@ void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) */ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - int ret_code; + int err; /* verify input for valid configuration */ if (!cq->ring_size || !cq->buf_size) return -EINVAL; /* allocate the ring memory */ - ret_code = idpf_ctlq_alloc_desc_ring(hw, cq); - if (ret_code) - return ret_code; + err = idpf_ctlq_alloc_desc_ring(hw, cq); + if (err) + return err; /* allocate buffers in the rings */ - ret_code = idpf_ctlq_alloc_bufs(hw, cq); - if (ret_code) + err = idpf_ctlq_alloc_bufs(hw, cq); + if (err) goto idpf_init_cq_free_ring; /* success! */ @@ -174,5 +174,5 @@ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) idpf_init_cq_free_ring: idpf_free_dma_mem(hw, &cq->desc_ring); - return ret_code; + return err; } -- 2.43.0