From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4A6FC4414E; Tue, 4 Jun 2024 10:50:25 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7A66E43590; Tue, 4 Jun 2024 10:49:06 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.21]) by mails.dpdk.org (Postfix) with ESMTP id 9952D43587 for ; Tue, 4 Jun 2024 10:49:02 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1717490943; x=1749026943; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=36/OIlrEoPg84t1LJ4WaT/wekA1oEe501GJNcyzJwBM=; b=MOdVPQyNQPiIIfT1a/m/T4w/PBRYAGFq9+aZEViPyzC6lYVUyeKS/BIs Jp5ZC2/6bZG5YUh4jfMjeB9SewbNXj0eqwCo5oqUgQ4Rdu+I+2kDzEiG+ xsemHEh6xdiZg/ChzFbTKjRQNhXNQ1flYwvXOojOxCWLsZJVNPKVkzc6Y BuwDx3A21Y+g/aoOSNzCi5PslDJjbJnU2ZwG4RFe900PJYyY0Bz2SbOSp DY0jTj8QOGVBKdc98iraTwKuaH9HClYGxtVRMz9IktGorHbR+f74jn10e Tv2JhBUv6IcyXVKMAFXYBtLbQCYmvfYrVYx+0qzatA87dqGp2qBjHrwQs A==; X-CSE-ConnectionGUID: fBHrfCa5QmC33S9G2GkFwg== X-CSE-MsgGUID: TqBNAbBEQaSutgtOmzq8nA== X-IronPort-AV: E=McAfee;i="6600,9927,11092"; a="13971720" X-IronPort-AV: E=Sophos;i="6.08,213,1712646000"; d="scan'208";a="13971720" Received: from orviesa004.jf.intel.com ([10.64.159.144]) by orvoesa113.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 04 Jun 2024 01:49:02 -0700 X-CSE-ConnectionGUID: LGwsYRD+QNq5edEogntjIg== X-CSE-MsgGUID: 39EyS9LZR5amBpkf2FZdGA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.08,213,1712646000"; d="scan'208";a="42268694" Received: from unknown (HELO npf-hyd-clx-03..) ([10.145.170.182]) by orviesa004.jf.intel.com with ESMTP; 04 Jun 2024 01:49:01 -0700 From: Soumyadeep Hore To: bruce.richardson@intel.com, anatoly.burakov@intel.com Cc: dev@dpdk.org Subject: [PATCH v2 13/21] common/idpf: avoid variable 0-init Date: Tue, 4 Jun 2024 08:06:03 +0000 Message-ID: <20240604080611.2197835-14-soumyadeep.hore@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240604080611.2197835-1-soumyadeep.hore@intel.com> References: <20240528072839.867100-1-soumyadeep.hore@intel.com> <20240604080611.2197835-1-soumyadeep.hore@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Dont initialize the variables if not needed. Also use 'err' instead of 'status', 'ret_code', 'ret' etc. for consistency and change the return label 'sq_send_command_out' to 'err_unlock'. Signed-off-by: Soumyadeep Hore --- drivers/common/idpf/base/idpf_controlq.c | 63 +++++++++---------- .../common/idpf/base/idpf_controlq_setup.c | 18 +++--- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c index b5ba9c3bd0..bd23e54421 100644 --- a/drivers/common/idpf/base/idpf_controlq.c +++ b/drivers/common/idpf/base/idpf_controlq.c @@ -61,7 +61,7 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, */ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) { - int i = 0; + int i; for (i = 0; i < cq->ring_size; i++) { struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); @@ -134,7 +134,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, { struct idpf_ctlq_info *cq; bool is_rxq = false; - int status = 0; + int err; if (!qinfo->len || !qinfo->buf_size || qinfo->len > IDPF_CTLQ_MAX_RING_SIZE || @@ -164,16 +164,16 @@ int idpf_ctlq_add(struct idpf_hw *hw, is_rxq = true; /* fallthrough */ case IDPF_CTLQ_TYPE_MAILBOX_TX: - status = idpf_ctlq_alloc_ring_res(hw, cq); + err = idpf_ctlq_alloc_ring_res(hw, cq); break; default: - status = -EINVAL; + err = -EINVAL; break; } - if (status) + if (err) #ifdef NVME_CPF - return status; + return err; #else goto init_free_q; #endif @@ -187,7 +187,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_calloc(hw, qinfo->len, sizeof(struct idpf_ctlq_msg *)); if (!cq->bi.tx_msg) { - status = -ENOMEM; + err = -ENOMEM; goto init_dealloc_q_mem; } #endif @@ -203,17 +203,16 @@ int idpf_ctlq_add(struct idpf_hw *hw, #ifndef NVME_CPF *cq_out = cq; - return status; + return 0; init_dealloc_q_mem: /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); init_free_q: idpf_free(hw, cq); - cq = NULL; #endif - return status; + return err; } /** @@ -249,8 +248,8 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, #endif { struct idpf_ctlq_info *cq = NULL, *tmp = NULL; - int ret_code = 0; - int i = 0; + int err; + int i; LIST_INIT(&hw->cq_list_head); @@ -261,19 +260,19 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, cq = *(ctlq + i); #endif - ret_code = idpf_ctlq_add(hw, qinfo, &cq); - if (ret_code) + err = idpf_ctlq_add(hw, qinfo, &cq); + if (err) goto init_destroy_qs; } - return ret_code; + return 0; init_destroy_qs: LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head, idpf_ctlq_info, cq_list) idpf_ctlq_remove(hw, cq); - return ret_code; + return err; } /** @@ -307,9 +306,9 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) { struct idpf_ctlq_desc *desc; - int num_desc_avail = 0; - int status = 0; - int i = 0; + int num_desc_avail; + int err = 0; + int i; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -319,8 +318,8 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { - status = -ENOSPC; - goto sq_send_command_out; + err = -ENOSPC; + goto err_unlock; } for (i = 0; i < num_q_msg; i++) { @@ -391,10 +390,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_use); -sq_send_command_out: +err_unlock: idpf_release_lock(&cq->cq_lock); - return status; + return err; } /** @@ -418,9 +417,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, struct idpf_ctlq_msg *msg_status[], bool force) { struct idpf_ctlq_desc *desc; - u16 i = 0, num_to_clean; + u16 i, num_to_clean; u16 ntc, desc_err; - int ret = 0; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -467,7 +465,7 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, /* Return number of descriptors actually cleaned */ *clean_count = i; - return ret; + return 0; } /** @@ -534,7 +532,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 ntp = cq->next_to_post; bool buffs_avail = false; u16 tbp = ntp + 1; - int status = 0; int i = 0; if (*buff_count > cq->ring_size) @@ -635,7 +632,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; - return status; + return 0; } /** @@ -654,8 +651,8 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, { u16 num_to_clean, ntc, ret_val, flags; struct idpf_ctlq_desc *desc; - int ret_code = 0; - u16 i = 0; + int err = 0; + u16 i; if (!cq || !cq->ring_size) return -ENOBUFS; @@ -688,7 +685,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, IDPF_CTLQ_FLAG_FTYPE_S; if (flags & IDPF_CTLQ_FLAG_ERR) - ret_code = -EBADMSG; + err = -EBADMSG; q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high); q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low); @@ -734,7 +731,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, *num_q_msg = i; if (*num_q_msg == 0) - ret_code = -ENOMSG; + err = -ENOMSG; - return ret_code; + return err; } diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c index 21f43c74f5..cd6bcb1cf0 100644 --- a/drivers/common/idpf/base/idpf_controlq_setup.c +++ b/drivers/common/idpf/base/idpf_controlq_setup.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2023 Intel Corporation + * Copyright(c) 2001-2024 Intel Corporation */ @@ -34,7 +34,7 @@ static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - int i = 0; + int i; /* Do not allocate DMA buffers for transmit queues */ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) @@ -153,20 +153,20 @@ void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) */ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - int ret_code; + int err; /* verify input for valid configuration */ if (!cq->ring_size || !cq->buf_size) return -EINVAL; /* allocate the ring memory */ - ret_code = idpf_ctlq_alloc_desc_ring(hw, cq); - if (ret_code) - return ret_code; + err = idpf_ctlq_alloc_desc_ring(hw, cq); + if (err) + return err; /* allocate buffers in the rings */ - ret_code = idpf_ctlq_alloc_bufs(hw, cq); - if (ret_code) + err = idpf_ctlq_alloc_bufs(hw, cq); + if (err) goto idpf_init_cq_free_ring; /* success! */ @@ -174,5 +174,5 @@ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) idpf_init_cq_free_ring: idpf_free_dma_mem(hw, &cq->desc_ring); - return ret_code; + return err; } -- 2.43.0