From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pa0-f54.google.com (mail-pa0-f54.google.com [209.85.220.54]) by dpdk.org (Postfix) with ESMTP id 3BC6F2BA0 for ; Mon, 4 Apr 2016 18:13:53 +0200 (CEST) Received: by mail-pa0-f54.google.com with SMTP id fe3so147128274pab.1 for ; Mon, 04 Apr 2016 09:13:53 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id; bh=vdgsCN6CarNtPKr2ncvZyNn7KkZuMuPQX+zJeYwe3ds=; b=rxW2qQU+Y3m6q/fWbtHLnvUH3KrzNlwIM3gI3JCentLbNY8plfaNDjtRQIf0zlfIth Up9Dah6nekE56Ad9o5r5hRqNFnhPBd6Ckq2yECSBzq6T9lQa3Te033LklpJjfnSrB64C PZIYSlmzMQh6tCHX3go9ee/nXvFPUVr6VXppEezIWPrmDtNdfUyJHX+T8U5wAJYtjal4 t0o+21flIUDTGpkoYM8y+gB84Bkyd+iYX6QpO+EybuD6HcBqVBTl8jF4a487VGZmyGU9 sSSqvaALJYowEnOmKlFScaExMxGcvJKjhXI3HoGS5KYz9TVvKc26wgqrzRavOwCLcZE8 B+9Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id; bh=vdgsCN6CarNtPKr2ncvZyNn7KkZuMuPQX+zJeYwe3ds=; b=IIXGNUFo6i9D3+iFSgdt+Za6jk5YPP8JdvcN6oJTSJr7KFy4WO6kniAg3P7PdfLSno RHvTKmXTUyOGeNGdlqDKbVRC61maipaDasW5MMRLvbAKOT6kej+gGY0+CxHuYv2OcSbm /7hSESOqU2h42Ck/cjJ+IlxPLlNPUMGOxMamSmWI11ahTaoZGsAuYPs0LUAbSzBxr0pW fjcFQ607ZLttCMkT3aTdd5FrloyX8UgxKpEd8mDCDrJAn75S3hGSOWzkI3nVeZi4hCfL x+GcSA1FiJmDx5uZBk4XuPAoW1jQLYYR/tnVrLb4HAMl7eHnXa4mzUvbFKGUkHMtVmQW mL/A== X-Gm-Message-State: AD7BkJJs4/Q6wlDDKM1hR/WiEiW5pefkrs66Kp7tlS4ukIfashAZ75LBJLcBQg8wlF6ZeA== X-Received: by 10.66.180.111 with SMTP id dn15mr54024248pac.69.1459786432333; Mon, 04 Apr 2016 09:13:52 -0700 (PDT) Received: from xeon-e3.home.lan (static-50-53-71-109.bvtn.or.frontiernet.net. [50.53.71.109]) by smtp.gmail.com with ESMTPSA id o71sm40594767pfj.68.2016.04.04.09.13.50 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 04 Apr 2016 09:13:51 -0700 (PDT) From: Stephen Hemminger To: Helin Zhang , Konstantin Ananyev Cc: dev@dpdk.org, Stephen Hemminger Date: Mon, 4 Apr 2016 09:14:03 -0700 Message-Id: <1459786443-27501-1-git-send-email-stephen@networkplumber.org> X-Mailer: git-send-email 2.1.4 Subject: [dpdk-dev] [PATCH] ixgbe: cleanup whitespace and formatting issues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 04 Apr 2016 16:13:54 -0000 This driver was one of the originals and has lots of little whitespace issues. PS: I know Intel doesn't like whitespace changes, there is never a good time to do this, but no resuliting binary changes and it is unlikely that more changes to this driver will occur this late in release cycle. Signed-off-by: Stephen Hemminger --- drivers/net/ixgbe/ixgbe_82599_bypass.c | 4 +- drivers/net/ixgbe/ixgbe_bypass.c | 6 +- drivers/net/ixgbe/ixgbe_bypass.h | 8 +- drivers/net/ixgbe/ixgbe_bypass_defines.h | 2 +- drivers/net/ixgbe/ixgbe_ethdev.c | 156 +++++++++++++++---------------- drivers/net/ixgbe/ixgbe_fdir.c | 10 +- drivers/net/ixgbe/ixgbe_pf.c | 29 +++--- drivers/net/ixgbe/ixgbe_rxtx.c | 80 ++++++++-------- 8 files changed, 145 insertions(+), 150 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c index 21c42ea..db05d8e 100644 --- a/drivers/net/ixgbe/ixgbe_82599_bypass.c +++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c @@ -306,8 +306,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw) hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; hw->mac.ops.disable_tx_laser = NULL; - hw->mac.ops.enable_tx_laser = NULL; - hw->mac.ops.flap_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; } return rc; diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c index 73f608b..7006928 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.c +++ b/drivers/net/ixgbe/ixgbe_bypass.c @@ -82,7 +82,7 @@ ixgbe_bypass_set_time(struct ixgbe_adapter *adapter) BYPASS_CTL1_VALID_M | BYPASS_CTL1_OFFTRST_M; value = (sec & BYPASS_CTL1_TIME_M) | - BYPASS_CTL1_VALID | + BYPASS_CTL1_VALID | BYPASS_CTL1_OFFTRST; FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set); @@ -275,8 +275,8 @@ s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout) { struct ixgbe_hw *hw; - u32 status; - u32 mask; + u32 status; + u32 mask; s32 ret_val; struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h index fcd9774..5f5c63e 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.h +++ b/drivers/net/ixgbe/ixgbe_bypass.h @@ -37,10 +37,10 @@ #ifdef RTE_NIC_BYPASS struct ixgbe_bypass_mac_ops { - s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status); - bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg); - s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); - s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value); + s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status); + bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg); + s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); + s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value); }; struct ixgbe_bypass_info { diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h index 22570ac..cafcb27 100644 --- a/drivers/net/ixgbe/ixgbe_bypass_defines.h +++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h @@ -136,7 +136,7 @@ enum ixgbe_state_t { #define BYPASS_LOG_EVENT_SHIFT 28 #define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */ #define IXGBE_DEV_TO_ADPATER(dev) \ - ((struct ixgbe_adapter*)(dev->data->dev_private)) + ((struct ixgbe_adapter *)(dev->data->dev_private)) /* extractions from ixgbe_phy.h */ #define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 3f1ebc1..1be0009 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -232,7 +232,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); @@ -264,14 +264,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); + ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); +static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + uint64_t pool_mask, uint8_t vlan_on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); @@ -397,21 +397,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) @@ -901,8 +901,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { + } else { PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); @@ -911,7 +910,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -929,7 +928,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -952,7 +951,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -1016,7 +1015,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1039,7 +1038,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized * Tx queue may not initialized by primary process */ @@ -1086,7 +1085,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -1313,7 +1312,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1328,7 +1327,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized * Tx queue may not initialized by primary process @@ -1537,7 +1536,7 @@ ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1611,7 +1610,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1658,8 +1657,7 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) /* No queue level support */ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); ctrl &= ~IXGBE_RXDCTL_VME; @@ -1682,8 +1680,7 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) /* No queue level supported */ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); ctrl |= IXGBE_RXDCTL_VME; @@ -1707,8 +1704,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); @@ -1735,8 +1731,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); @@ -2664,15 +2659,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Errors */ stats->imissed = total_missed_rx; stats->ierrors = hw_stats->crcerrs + - hw_stats->mspdc + - hw_stats->rlec + - hw_stats->ruc + - hw_stats->roc + - hw_stats->illerrc + - hw_stats->errbc + - hw_stats->rfc + - hw_stats->fccrc + - hw_stats->fclast; + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; @@ -2786,7 +2781,7 @@ static void ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -2859,7 +2854,7 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -3356,7 +3351,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); } else { PMD_DRV_LOG(DEBUG, "enable intr immediately"); @@ -3575,7 +3570,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -3622,13 +3617,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -3645,20 +3640,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -3708,13 +3703,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; if (hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -3728,9 +3723,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -3763,7 +3758,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -3971,7 +3966,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { - struct rte_eth_conf* conf = &dev->data->dev_conf; + struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -4033,7 +4028,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ @@ -4084,7 +4079,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; @@ -4123,18 +4118,18 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ + for (j = 0; j < 32; j++) { if (vfta & mask) ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + mask <<= 1; } } } @@ -4146,7 +4141,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -4191,7 +4186,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } static void @@ -4207,7 +4202,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); for (i = 0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + ixgbevf_vlan_strip_queue_set(dev, i, on); } } @@ -4227,9 +4222,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -4257,7 +4253,7 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr* mac_addr, uint8_t on) { uint32_t vector; @@ -4279,7 +4275,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; @@ -4304,7 +4300,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -4389,7 +4385,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) { - uint32_t reg,addr; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; @@ -4408,7 +4404,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } @@ -4416,7 +4412,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) { - uint32_t reg,addr; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; @@ -4435,7 +4431,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } @@ -4453,7 +4449,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, return -ENOTSUP; for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); + ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, vlan_on); if (ret < 0) return ret; } @@ -4475,7 +4471,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - uint32_t mr_ctl,vlvf; + uint32_t mr_ctl, vlvf; uint32_t mp_lsb = 0; uint32_t mv_msb = 0; uint32_t mv_lsb = 0; @@ -4488,7 +4484,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, const uint8_t vlan_mask_offset = 32; const uint8_t dst_pool_offset = 8; const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + const uint8_t mirror_rule_mask = 0x0F; struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); @@ -4511,7 +4507,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { mirror_type |= IXGBE_MRCTL_VLME; /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { /* search vlan id related pool vlan filter index */ reg_index = ixgbe_find_vlvf_slot(hw, diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 2e4c353..d040feb 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -189,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) + IXGBE_FDIRCTRL_INIT_DONE) break; msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { - PMD_INIT_LOG(ERR, "Flow Director poll time exceeded " - "during enabling!"); + PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!"); return -ETIMEDOUT; } return 0; @@ -282,6 +281,7 @@ static inline uint32_t reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword) { uint32_t mask = hi_dword << 16; + mask |= lo_dword; mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); @@ -822,7 +822,7 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, /* process the remaining 30 bits in the key 2 bits at a time */ - for (i = 15; i; i-- ) { + for (i = 15; i; i--) { if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; } @@ -1016,7 +1016,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw, /* configure FDIRCMD register */ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index a2787d9..ceb07f4 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -97,9 +97,9 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) struct ixgbe_vf_info **vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); struct ixgbe_mirror_info *mirror_info = - IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); struct ixgbe_uta_info *uta_info = - IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); uint16_t vf_num; @@ -115,8 +115,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); - memset(mirror_info,0,sizeof(struct ixgbe_mirror_info)); - memset(uta_info,0,sizeof(struct ixgbe_uta_info)); + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); + memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); hw->mac.mc_filter_type = 0; if (vf_num >= ETH_32_POOLS) { @@ -280,19 +280,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) } IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - /* + /* * enable vlan filtering and allow all vlan tags through */ - vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); - /* VFTA - enable all vlan filters */ - for (i = 0; i < IXGBE_MAX_VFTA; i++) { - IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); - } + /* VFTA - enable all vlan filters */ + for (i = 0; i < IXGBE_MAX_VFTA; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); @@ -481,7 +481,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); - if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) { + if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); } @@ -678,6 +678,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) /* perform VF reset */ if (msgbuf[0] == IXGBE_VF_RESET) { int ret = ixgbe_vf_reset(dev, vf, msgbuf); + vfinfo[vf].clear_to_send = true; return ret; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index b018ba7..794e174 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1962,7 +1962,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); txq->ctx_curr = 0; - memset((void*)&txq->ctx_cache, 0, + memset((void *)&txq->ctx_cache, 0, IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); } @@ -2442,7 +2442,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, /* * Zero init all the descriptors in the ring. */ - memset (rz->addr, 0, RX_RING_SZ); + memset(rz->addr, 0, RX_RING_SZ); /* * Modified to setup VFRDT for Virtual Function @@ -2456,8 +2456,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); rxq->rdh_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); - } - else { + } else { rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); rxq->rdh_reg_addr = @@ -2924,14 +2923,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* zero alloc all unused TCs */ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT )); + + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); /* clear 10 bits. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } /* MRQC: enable vmdq and dcb */ mrqc = ((num_pools == ETH_16_POOLS) ? \ - IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN ); + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN); IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* PFVTCTL: turn on virtualisation and set the default pool */ @@ -3000,7 +3000,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) */ static void ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t q; @@ -3015,18 +3015,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, /* Enable DCB for Tx with 8 TCs */ if (dcb_config->num_tcs.pg_tcs == 8) { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - } - else { + } else { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; } if (dcb_config->vt_mode) - reg |= IXGBE_MTQC_VT_ENA; + reg |= IXGBE_MTQC_VT_ENA; IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); /* Disable drop for all queues */ for (q = 0; q < 128; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); @@ -3062,25 +3061,24 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /*Configure general DCB TX parameters*/ - ixgbe_dcb_tx_hw_config(hw,dcb_config); + ixgbe_dcb_tx_hw_config(hw, dcb_config); return; } static void ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3095,19 +3093,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, static void ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3129,7 +3126,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; @@ -3150,7 +3147,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_tx_conf *tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; @@ -3171,7 +3168,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, */ static void ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t vlanctrl; @@ -3237,7 +3234,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, static void ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, - uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) + uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -3262,16 +3259,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m { switch (hw->mac.type) { case ixgbe_mac_82598EB: - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); break; default: break; @@ -3292,7 +3289,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config) { int ret = 0; - uint8_t i,pfc_en,nb_tcs; + uint8_t i, pfc_en, nb_tcs; uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; @@ -3306,7 +3303,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch(dev->data->dev_conf.rxmode.mq_mode){ + switch (dev->data->dev_conf.rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: dcb_config->vt_mode = true; if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3338,9 +3335,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, dcb_config->vt_mode = true; config_dcb_tx = DCB_TX_CONFIG; /* get DCB and VT TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_vt_tx_config(dev,dcb_config); + ixgbe_dcb_vt_tx_config(dev, dcb_config); /*Configure general VMDQ and DCB TX parameters*/ - ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config); + ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config); break; case ETH_MQ_TX_DCB: @@ -3363,8 +3360,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Avoid un-configured priority mapping to TC0 */ uint8_t j = 4; uint8_t mask = 0xFF; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) - mask = (uint8_t)(mask & (~ (1 << map[i]))); + mask = (uint8_t)(mask & (~(1 << map[i]))); for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) map[j++] = i; @@ -3424,9 +3422,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } /*Calculates traffic class credits*/ - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_RX_CONFIG); if (config_dcb_rx) { @@ -3436,7 +3434,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa); /* Configure PG(ETS) RX */ - ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map); } if (config_dcb_tx) { @@ -3446,7 +3444,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); /* Configure PG(ETS) TX */ - ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map); } /*Configure queue statistics registers*/ @@ -3460,7 +3458,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, * If the TC count is 8,and the default high_water is 48, * the low_water is 16 as default. */ - hw->fc.high_water[i] = (pbsize * 3 ) / 4; + hw->fc.high_water[i] = (pbsize * 3) / 4; hw->fc.low_water[i] = pbsize / 4; /* Enable pfc for this TC */ tc = &dcb_config->tc_config[i]; @@ -3617,7 +3615,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) /* Disable drop for all queues */ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); -- 2.1.4