From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 20C501B701 for ; Wed, 9 May 2018 17:29:11 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 May 2018 18:06:11 +0300 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w49F4SrK008373; Wed, 9 May 2018 18:04:29 +0300 From: Shahaf Shuler To: bluca@debian.org Cc: stable@dpdk.org, nelio.laranjeiro@6wind.com, adrien.mazarguil@6wind.com, yskoh@mellanox.com Date: Wed, 9 May 2018 18:03:55 +0300 Message-Id: <1ce27a493b31ef636724e8eb23c85a72a12f21c5.1525878118.git.shahafs@mellanox.com> X-Mailer: git-send-email 2.12.0 MIME-Version: 1.0 In-Reply-To: References: Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [dpdk-stable] [PATCH v2 03/20] net/mlx5: mark parameters with unused attribute X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 09 May 2018 15:29:11 -0000 From: NĂ©lio Laranjeiro [ upstream commit 56f08e1671f99a15cf6b00027c2f7d81d69c4f5f ] Replaces all (void)foo; by __rte_unused macro except when variables are under #if statements. Signed-off-by: Nelio Laranjeiro Acked-by: Adrien Mazarguil --- drivers/net/mlx5/mlx5.c | 4 +-- drivers/net/mlx5/mlx5_ethdev.c | 18 +++++-------- drivers/net/mlx5/mlx5_flow.c | 25 +++++++----------- drivers/net/mlx5/mlx5_mac.c | 3 +-- drivers/net/mlx5/mlx5_mr.c | 10 +++---- drivers/net/mlx5/mlx5_rxq.c | 4 +-- drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++--------------------- drivers/net/mlx5/mlx5_stats.c | 2 +- drivers/net/mlx5/mlx5_trigger.c | 4 +-- drivers/net/mlx5/mlx5_txq.c | 19 ++++++-------- 10 files changed, 55 insertions(+), 85 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 2bb6998467..d6c1c734a1 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -571,7 +571,8 @@ priv_uar_init_secondary(struct priv *priv) * 0 on success, negative errno value on failure. */ static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { struct ibv_device **list; struct ibv_device *ibv_dev; @@ -588,7 +589,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_counter_set_description cs_desc; #endif - (void)pci_drv; assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 93c7672d3b..d84f90e4f6 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -468,11 +468,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -484,7 +482,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) /* priv_lock() is not taken to allow concurrent calls. */ - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -534,11 +531,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; @@ -546,7 +541,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) struct rte_eth_link dev_link; uint64_t sc; - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -676,7 +670,7 @@ priv_link_stop(struct priv *priv) * Wait for request completion (ignored). */ int -priv_link_update(struct priv *priv, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) { struct rte_eth_dev *dev = priv->dev; struct utsname utsname; @@ -688,9 +682,9 @@ priv_link_update(struct priv *priv, int wait_to_complete) sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev); else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gs(dev); /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { priv_link_start(priv); @@ -742,7 +736,7 @@ priv_force_link_status_change(struct priv *priv, int status) * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { struct priv *priv = dev->data->dev_private; int ret; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 42dde1cbf9..76900b095a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -527,7 +527,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv, +priv_flow_convert_rss_conf(struct priv *priv __rte_unused, struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { @@ -536,7 +536,6 @@ priv_flow_convert_rss_conf(struct priv *priv, * priv_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ - (void)priv; if (rss_conf) { if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) return EINVAL; @@ -569,13 +568,11 @@ priv_flow_convert_rss_conf(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv, +priv_flow_convert_attributes(struct priv *priv __rte_unused, const struct rte_flow_attr *attr, struct rte_flow_error *error, - struct mlx5_flow_parse *parser) + struct mlx5_flow_parse *parser __rte_unused) { - (void)priv; - (void)parser; if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -788,7 +785,7 @@ priv_flow_convert_actions(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, +priv_flow_convert_items_validate(struct priv *priv __rte_unused, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) @@ -796,7 +793,6 @@ priv_flow_convert_items_validate(struct priv *priv, const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset = sizeof(struct ibv_flow_attr); @@ -880,14 +876,13 @@ priv_flow_convert_items_validate(struct priv *priv, * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, +priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; - (void)priv; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, @@ -909,7 +904,8 @@ priv_flow_convert_allocate(struct priv *priv, * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) +priv_flow_convert_finalise(struct priv *priv __rte_unused, + struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -920,7 +916,6 @@ priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; unsigned int i; - (void)priv; /* Remove any other flow not matching the pattern. */ if (parser->queues_n == 1) { for (i = 0; i != hash_rxq_init_n; ++i) { @@ -2441,11 +2436,10 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); priv_unlock(priv); @@ -2460,11 +2454,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_flush(priv, &priv->flows); priv_unlock(priv); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d45940..a529dfeac7 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -88,12 +88,11 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) + uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd837..38a8e2f409 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -26,15 +26,12 @@ struct mlx5_check_mempool_data { /* Called by mlx5_check_mempool() when iterating the memory chunks. */ static void -mlx5_check_mempool_cb(struct rte_mempool *mp, +mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) + unsigned int mem_idx __rte_unused) { struct mlx5_check_mempool_data *data = opaque; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ if (data->ret != 0) return; @@ -336,9 +333,8 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) { - (void)priv; assert(mr); DEBUG("Memory Region %p refcnt: %d", (void *)mr, rte_atomic32_read(&mr->refcnt)); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 238fa7e563..8b9cc1dd04 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -910,9 +910,9 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 049f7e6c1f..93d794ede7 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1899,11 +1899,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +1923,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +1938,51 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +priv_check_vec_rx_support(struct priv *priv __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index eb9c65dcc9..167e405480 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -488,7 +488,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { struct priv *priv = dev->data->dev_private; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a998b..72e8ff6440 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -340,9 +340,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) * 0 on success. */ int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) +priv_dev_traffic_disable(struct priv *priv, + struct rte_eth_dev *dev __rte_unused) { - (void)dev; priv_flow_flush(priv, &priv->ctrl_flows); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 42996e8db3..b10700beb6 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -578,9 +578,9 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); @@ -603,9 +603,9 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -808,13 +808,10 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) mlx5_priv_txq_ibv_get(priv, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } + if (ctrl->txq.mp2mr[i]) + claim_nonzero + (priv_mr_get(priv, + ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, -- 2.12.0