From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2025D45F4D; Thu, 26 Dec 2024 21:59:39 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E9E16402AF; Thu, 26 Dec 2024 21:59:38 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 452F54029D for ; Thu, 26 Dec 2024 21:59:37 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1213) id 75782203EC22; Thu, 26 Dec 2024 12:59:36 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 75782203EC22 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1735246776; bh=SZwaeMNwuKGStjnlBt3H/cU2B4de6iBWBugmLqp2xyY=; h=From:To:Cc:Subject:Date:From; b=HlBrPlXq67f6Q2UQtXQ6w8ZejHb0KZBweiZumya3XWTtipzlwBxgJ6P1jAxL6jehV SdkEQPq2laj5QiRTyXdVQyD/X7r997p973QFbiWJvfv/Donp8DfzH/lAqhaQDxZMQ7 PiXx7lfvkDmJ11Daj2ekKmDSGde5TJIlqLEKKAtA= From: Andre Muezerie To: Ian Stokes , Bruce Richardson , Vladimir Medvedkin , Anatoly Burakov , Jochen Behrens Cc: dev@dpdk.org, Andre Muezerie Subject: [PATCH] drivers_net: use 64-bit shift and avoid signed/unsigned mismatch Date: Thu, 26 Dec 2024 12:59:30 -0800 Message-Id: <1735246770-731-1-git-send-email-andremue@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch avoids warnings like the ones below emitted by MSVC: 1) ../drivers/net/ice/base/ice_flg_rd.c(71): warning C4334: '<<': result of 32-bit shift implicitly converted to 64 bits (was 64-bit shift intended?) 2) ../drivers/net/ice/ice_dcf_sched.c(177): warning C4018: '>=': signed/unsigned mismatch The fix for (1) is to use 64-bit shifting when appropriate (according to what the result is used for). The fix for (2) is to explicitly cast the variables used in the comparison. Signed-off-by: Andre Muezerie --- drivers/net/i40e/i40e_ethdev.c | 22 +++++++++++----------- drivers/net/iavf/iavf_ethdev.c | 2 +- drivers/net/iavf/iavf_rxtx.c | 2 +- drivers/net/iavf/iavf_vchnl.c | 2 +- drivers/net/ice/base/ice_flg_rd.c | 4 ++-- drivers/net/ice/base/ice_parser_rt.c | 16 ++++++++-------- drivers/net/ice/base/ice_xlt_kb.c | 2 +- drivers/net/ice/ice_dcf_sched.c | 2 +- drivers/net/ice/ice_ethdev.c | 4 ++-- drivers/net/ice/ice_rxtx.c | 2 +- drivers/net/ixgbe/ixgbe_ethdev.c | 2 +- drivers/net/vmxnet3/vmxnet3_ethdev.h | 6 +++--- 12 files changed, 33 insertions(+), 33 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 30dcdc68a8..e565c953e2 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -3094,7 +3094,7 @@ i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg, /* enlarge the limitation when statistics counters overflowed */ if (offset_loaded) { if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat) - *stat += (uint64_t)1 << I40E_48_BIT_WIDTH; + *stat += RTE_BIT64(I40E_48_BIT_WIDTH); *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat); } *prev_stat = *stat; @@ -4494,7 +4494,7 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) pool_sel = dev->data->mac_pool_sel[index]; for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) { - if (pool_sel & (1ULL << i)) { + if (pool_sel & RTE_BIT64(i)) { if (i == 0) vsi = pf->main_vsi; else { @@ -4630,7 +4630,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, for (i = 0; i < reta_size; i++) { idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; - if (reta_conf[idx].mask & (1ULL << shift)) + if (reta_conf[idx].mask & RTE_BIT64(shift)) lut[i] = reta_conf[idx].reta[shift]; } ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); @@ -4674,7 +4674,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, for (i = 0; i < reta_size; i++) { idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; - if (reta_conf[idx].mask & (1ULL << shift)) + if (reta_conf[idx].mask & RTE_BIT64(shift)) reta_conf[idx].reta[shift] = lut[i]; } @@ -6712,7 +6712,7 @@ i40e_vmdq_setup(struct rte_eth_dev *dev) loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT; for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) { - if (vmdq_conf->pool_map[i].pools & (1UL << j)) { + if (vmdq_conf->pool_map[i].pools & RTE_BIT64(j)) { PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u", vmdq_conf->pool_map[i].vlan_id, j); @@ -6761,7 +6761,7 @@ i40e_stat_update_32(struct i40e_hw *hw, *stat = (uint64_t)(new_data - *offset); else *stat = (uint64_t)((new_data + - ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); + RTE_BIT64(I40E_32_BIT_WIDTH)) - *offset); } static void @@ -6789,7 +6789,7 @@ i40e_stat_update_48(struct i40e_hw *hw, *stat = new_data - *offset; else *stat = (uint64_t)((new_data + - ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + RTE_BIT64(I40E_48_BIT_WIDTH)) - *offset); *stat &= I40E_48_BIT_MASK; } @@ -7730,7 +7730,7 @@ i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags) return hena; for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { - if (flags & (1ULL << i)) + if (flags & RTE_BIT64(i)) hena |= adapter->pctypes_tbl[i]; } @@ -7749,7 +7749,7 @@ i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { if (flags & adapter->pctypes_tbl[i]) - rss_hf |= (1ULL << i); + rss_hf |= RTE_BIT64(i); } return rss_hf; } @@ -10162,7 +10162,7 @@ i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type) if (flow_type < I40E_FLOW_TYPE_MAX) { pctype_mask = adapter->pctypes_tbl[flow_type]; for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) { - if (pctype_mask & (1ULL << i)) + if (pctype_mask & RTE_BIT64(i)) return (enum i40e_filter_pctype)i; } } @@ -10174,7 +10174,7 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, enum i40e_filter_pctype pctype) { uint16_t flowtype; - uint64_t pctype_mask = 1ULL << pctype; + uint64_t pctype_mask = RTE_BIT64(pctype); for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX; flowtype++) { diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 7f80cd6258..b1469c5998 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -2485,7 +2485,7 @@ iavf_init_proto_xtr(struct rte_eth_dev *dev) if (!xtr_ol->required) continue; - if (!(vf->supported_rxdid & BIT(rxdid))) { + if (!(vf->supported_rxdid & RTE_BIT64(rxdid))) { PMD_DRV_LOG(ERR, "rxdid[%u] is not supported in hardware", rxdid); diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 6a093c6746..79ec76697c 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -3915,7 +3915,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, " "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i); use_flex = false; - } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) { + } else if (!(vf->supported_rxdid & RTE_BIT64(rxq->rxdid))) { PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, " "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i); use_flex = false; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 065ab3594c..6aeb36946b 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -1265,7 +1265,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { - if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) { + if (vf->supported_rxdid & RTE_BIT64(rxq[i]->rxdid)) { vc_qp->rxq.rxdid = rxq[i]->rxdid; PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]", vc_qp->rxq.rxdid, i); diff --git a/drivers/net/ice/base/ice_flg_rd.c b/drivers/net/ice/base/ice_flg_rd.c index 97f62e6cc4..837f465978 100644 --- a/drivers/net/ice/base/ice_flg_rd.c +++ b/drivers/net/ice/base/ice_flg_rd.c @@ -68,8 +68,8 @@ u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) if (!item->expose) continue; - if (psr_flg & (1ul << item->intr_flg_id)) - flg |= (1ul << i); + if (psr_flg & RTE_BIT64(item->intr_flg_id)) + flg |= RTE_BIT64(i); } return flg; diff --git a/drivers/net/ice/base/ice_parser_rt.c b/drivers/net/ice/base/ice_parser_rt.c index 09aac0c6e6..a96e8b5623 100644 --- a/drivers/net/ice/base/ice_parser_rt.c +++ b/drivers/net/ice/base/ice_parser_rt.c @@ -90,7 +90,7 @@ void ice_parser_rt_reset(struct ice_parser_rt *rt) rt->psr = psr; for (i = 0; i < 64; i++) { - if ((mi->flags & (1ul << i)) != 0ul) + if ((mi->flags & RTE_BIT64(i)) != 0ul) _rt_flag_set(rt, i, true); } } @@ -399,11 +399,11 @@ static void _pg_exe(struct ice_parser_rt *rt) static void _flg_add(struct ice_parser_rt *rt, int idx, bool val) { - rt->pu.flg_msk |= (1ul << idx); + rt->pu.flg_msk |= RTE_BIT64(idx); if (val) - rt->pu.flg_val |= (1ul << idx); + rt->pu.flg_val |= RTE_BIT64(idx); else - rt->pu.flg_val &= ~(1ul << idx); + rt->pu.flg_val &= ~RTE_BIT64(idx); ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n", idx, val); @@ -468,9 +468,9 @@ static void _err_add(struct ice_parser_rt *rt, int idx, bool val) { rt->pu.err_msk |= (u16)(1 << idx); if (val) - rt->pu.flg_val |= (1ULL << idx); + rt->pu.flg_val |= RTE_BIT64(idx); else - rt->pu.flg_val &= ~(1ULL << idx); + rt->pu.flg_val &= ~RTE_BIT64(idx); ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n", idx, val); @@ -600,8 +600,8 @@ static void _pu_exe(struct ice_parser_rt *rt) } for (i = 0; i < 64; i++) { - if (pu->flg_msk & (1ul << i)) - _rt_flag_set(rt, i, pu->flg_val & (1ul << i)); + if (pu->flg_msk & RTE_BIT64(i)) + _rt_flag_set(rt, i, pu->flg_val & RTE_BIT64(i)); } for (i = 0; i < 16; i++) { diff --git a/drivers/net/ice/base/ice_xlt_kb.c b/drivers/net/ice/base/ice_xlt_kb.c index 0d5a510384..112e568e1f 100644 --- a/drivers/net/ice/base/ice_xlt_kb.c +++ b/drivers/net/ice/base/ice_xlt_kb.c @@ -208,7 +208,7 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag) /* only check first entry */ u16 idx = (u16)(entry->flg0_14_sel[i] & 0x3f); - if (pkt_flag & (1ul << idx)) + if (pkt_flag & RTE_BIT64(idx)) flg |= (u16)(1u << i); } diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c index 7967c35533..2832d223d1 100644 --- a/drivers/net/ice/ice_dcf_sched.c +++ b/drivers/net/ice/ice_dcf_sched.c @@ -174,7 +174,7 @@ ice_dcf_node_param_check(struct ice_dcf_hw *hw, uint32_t node_id, } /* for non-leaf node */ - if (node_id >= 8 * hw->num_vfs) { + if (node_id >= (uint32_t)(8 * hw->num_vfs)) { if (params->nonleaf.wfq_weight_mode) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 93a6308a86..c3162741c3 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2469,13 +2469,13 @@ ice_get_supported_rxdid(struct ice_hw *hw) uint32_t regval; int i; - supported_rxdid |= BIT(ICE_RXDID_LEGACY_1); + supported_rxdid |= RTE_BIT64(ICE_RXDID_LEGACY_1); for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0)); if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) - supported_rxdid |= BIT(i); + supported_rxdid |= RTE_BIT64(i); } return supported_rxdid; } diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 0c7106c7e0..6abcbaad22 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -399,7 +399,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", rxq->port_id, rxq->queue_id, rxdid); - if (!(pf->supported_rxdid & BIT(rxdid))) { + if (!(pf->supported_rxdid & RTE_BIT64(rxdid))) { PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", rxdid); return -EINVAL; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 8bee97d191..d7108d5c80 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2722,7 +2722,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_set_vf_rate_limit( dev, vf, vfinfo[vf].tx_rate[idx], - 1 << idx); + RTE_BIT64(idx)); } ixgbe_restore_statistics_mapping(dev); diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index e9ded6663d..e59cb285f4 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -186,14 +186,14 @@ static inline uint8_t vmxnet3_get_ring_idx(struct vmxnet3_hw *hw, uint32 rqID) { return (rqID >= hw->num_rx_queues && - rqID < 2 * hw->num_rx_queues) ? 1 : 0; + rqID < (uint32)2 * hw->num_rx_queues) ? 1 : 0; } static inline bool vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID) { - return (rqID >= 2 * hw->num_rx_queues && - rqID < 3 * hw->num_rx_queues); + return (rqID >= (uint32)2 * hw->num_rx_queues && + rqID < (uint32)3 * hw->num_rx_queues); } /* -- 2.47.0.vfs.0.3