From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/4] rte_ethdev: whitespace cleanup
Date: Thu, 9 Apr 2015 14:29:40 -0700 [thread overview]
Message-ID: <1428614982-14135-3-git-send-email-stephen@networkplumber.org> (raw)
In-Reply-To: <1428614982-14135-1-git-send-email-stephen@networkplumber.org>
Fix space after keywords, and other missing whitespace.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
lib/librte_ether/rte_ethdev.c | 191 ++++++++++++++++++++++--------------------
lib/librte_ether/rte_ethdev.h | 57 ++++++-------
2 files changed, 128 insertions(+), 120 deletions(-)
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 3120c3a..56e22ea 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -83,13 +83,13 @@
PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
return (retval); \
} \
-} while(0)
+} while (0)
#define PROC_PRIMARY_OR_RET() do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
return; \
} \
-} while(0)
+} while (0)
/* Macros to check for invlaid function pointers in dev_ops structure */
#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
@@ -97,13 +97,13 @@
PMD_DEBUG_TRACE("Function not supported\n"); \
return (retval); \
} \
-} while(0)
+} while (0)
#define FUNC_PTR_OR_RET(func) do { \
if ((func) == NULL) { \
PMD_DEBUG_TRACE("Function not supported\n"); \
return; \
} \
-} while(0)
+} while (0)
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
@@ -185,7 +185,7 @@ rte_eth_dev_data_alloc(void)
const unsigned flags = 0;
const struct rte_memzone *mz;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
rte_socket_id(), flags);
@@ -257,7 +257,7 @@ rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
static int
rte_eth_dev_create_unique_device_name(char *name, size_t size,
- struct rte_pci_device *pci_dev)
+ struct rte_pci_device *pci_dev)
{
int ret;
@@ -265,8 +265,8 @@ rte_eth_dev_create_unique_device_name(char *name, size_t size,
return -EINVAL;
ret = snprintf(name, size, "%d:%d.%d",
- pci_dev->addr.bus, pci_dev->addr.devid,
- pci_dev->addr.function);
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
if (ret < 0)
return ret;
return 0;
@@ -303,7 +303,7 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv,
if (eth_dev == NULL)
return -ENOMEM;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
eth_drv->dev_private_size,
RTE_CACHE_LINE_SIZE);
@@ -699,9 +699,9 @@ rte_eth_dev_detach(uint8_t port_id, char *name)
ret = rte_eth_dev_detach_pdev(port_id, &addr);
if (ret == 0)
snprintf(name, RTE_ETH_NAME_MAX_LEN,
- "%04x:%02x:%02x.%d",
- addr.domain, addr.bus,
- addr.devid, addr.function);
+ "%04x:%02x:%02x.%d",
+ addr.domain, addr.bus,
+ addr.devid, addr.function);
return ret;
} else
@@ -710,7 +710,7 @@ rte_eth_dev_detach(uint8_t port_id, char *name)
#else /* RTE_LIBRTE_EAL_HOTPLUG */
int
rte_eth_dev_attach(const char *devargs __rte_unused,
- uint8_t *port_id __rte_unused)
+ uint8_t *port_id __rte_unused)
{
RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
return -1;
@@ -719,7 +719,7 @@ rte_eth_dev_attach(const char *devargs __rte_unused,
/* detach the device, then store the name of the device */
int
rte_eth_dev_detach(uint8_t port_id __rte_unused,
- char *name __rte_unused)
+ char *name __rte_unused)
{
RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
return -1;
@@ -754,6 +754,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return -(ENOMEM);
if (nb_queues > old_nb_queues) {
uint16_t new_qs = nb_queues - old_nb_queues;
+
memset(rxq + old_nb_queues, 0,
sizeof(rxq[0]) * new_qs);
}
@@ -897,6 +898,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return -ENOMEM;
if (nb_queues > old_nb_queues) {
uint16_t new_qs = nb_queues - old_nb_queues;
+
memset(txq + old_nb_queues, 0,
sizeof(txq[0]) * new_qs);
}
@@ -912,6 +914,7 @@ static int
rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
switch (nb_rx_q) {
case 1:
case 2:
@@ -1025,8 +1028,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return (-EINVAL);
}
conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
"nb_queue_pools must be %d or %d\n",
port_id, ETH_16_POOLS, ETH_32_POOLS);
@@ -1043,8 +1046,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return (-EINVAL);
}
conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
"nb_queue_pools != %d or nb_queue_pools "
"!= %d\n",
@@ -1064,8 +1067,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return (-EINVAL);
}
conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
"nb_tcs != %d or nb_tcs "
"!= %d\n",
@@ -1084,8 +1087,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return (-EINVAL);
}
conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
"nb_tcs != %d or nb_tcs "
"!= %d\n",
@@ -1195,8 +1198,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
(unsigned)dev_info.max_rx_pktlen);
return (-EINVAL);
- }
- else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
" < min valid value %u\n",
port_id,
@@ -1739,7 +1741,7 @@ rte_eth_stats_reset(uint8_t port_id)
/* retrieve ethdev extended statistics */
int
rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
- unsigned n)
+ unsigned n)
{
struct rte_eth_stats eth_stats;
struct rte_eth_dev *dev;
@@ -1776,7 +1778,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
stats_ptr = (char *)ð_stats + rte_stats_strings[i].offset;
val = *(uint64_t *)stats_ptr;
snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_stats_strings[i].name);
+ "%s", rte_stats_strings[i].name);
xstats[count++].value = val;
}
@@ -1788,8 +1790,8 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
stats_ptr += q * sizeof(uint64_t);
val = *(uint64_t *)stats_ptr;
snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_queue_%u_%s", q,
- rte_rxq_stats_strings[i].name);
+ "rx_queue_%u_%s", q,
+ rte_rxq_stats_strings[i].name);
xstats[count++].value = val;
}
}
@@ -1802,8 +1804,8 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
stats_ptr += q * sizeof(uint64_t);
val = *(uint64_t *)stats_ptr;
snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_queue_%u_%s", q,
- rte_txq_stats_strings[i].name);
+ "tx_queue_%u_%s", q,
+ rte_txq_stats_strings[i].name);
xstats[count++].value = val;
}
}
@@ -1954,7 +1956,7 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
}
dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+ if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
return (-ENOSYS);
}
@@ -2026,27 +2028,27 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
/*check which option changed by application*/
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
- if (cur != org){
+ if (cur != org) {
dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
mask |= ETH_VLAN_STRIP_MASK;
}
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
- if (cur != org){
+ if (cur != org) {
dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
mask |= ETH_VLAN_FILTER_MASK;
}
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
- if (cur != org){
+ if (cur != org) {
dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
mask |= ETH_VLAN_EXTEND_MASK;
}
/*no change*/
- if(mask == 0)
+ if (mask == 0)
return ret;
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
@@ -2069,13 +2071,13 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id)
dev = &rte_eth_devices[port_id];
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ret |= ETH_VLAN_STRIP_OFFLOAD ;
+ ret |= ETH_VLAN_STRIP_OFFLOAD;
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
- ret |= ETH_VLAN_FILTER_OFFLOAD ;
+ ret |= ETH_VLAN_FILTER_OFFLOAD;
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
- ret |= ETH_VLAN_EXTEND_OFFLOAD ;
+ ret |= ETH_VLAN_EXTEND_OFFLOAD;
return ret;
}
@@ -2120,8 +2122,8 @@ rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type, source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2154,8 +2156,8 @@ rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type, source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2188,8 +2190,8 @@ rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2209,7 +2211,7 @@ rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
}
dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.fdir_conf.mode)) {
+ if (!(dev->data->dev_conf.fdir_conf.mode)) {
PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
return (-ENOSYS);
}
@@ -2244,8 +2246,8 @@ rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type, source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2284,8 +2286,8 @@ rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type, source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2322,8 +2324,8 @@ rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
+ "None l4type, source & destinations ports "
"should be null!\n");
return (-EINVAL);
}
@@ -2348,7 +2350,7 @@ rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
}
dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.fdir_conf.mode)) {
+ if (!(dev->data->dev_conf.fdir_conf.mode)) {
PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
return (-ENOSYS);
}
@@ -2688,7 +2690,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
return (-EINVAL);
}
if (pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
+ PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
return (-EINVAL);
}
@@ -2771,16 +2773,16 @@ rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
+ if (vf > num_vfs) {
PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
return (-EINVAL);
}
- if (rx_mode == 0)
- {
+
+ if (rx_mode == 0) {
PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
return (-EINVAL);
}
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
}
@@ -2837,7 +2839,7 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
if (index < 0) {
if (!on) {
PMD_DEBUG_TRACE("port %d: the MAC address was not"
- "set in UTA\n", port_id);
+ " set in UTA\n", port_id);
return (-EINVAL);
}
@@ -2882,7 +2884,7 @@ rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
}
int
-rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
+rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
{
uint16_t num_vfs;
struct rte_eth_dev *dev;
@@ -2897,18 +2899,17 @@ rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
+ if (vf > num_vfs) {
PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
+ return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
}
int
-rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
+rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
{
uint16_t num_vfs;
struct rte_eth_dev *dev;
@@ -2923,19 +2924,18 @@ rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
+ if (vf > num_vfs) {
PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
+ return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
}
int
rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
- uint64_t vf_mask,uint8_t vlan_on)
+ uint64_t vf_mask, uint8_t vlan_on)
{
struct rte_eth_dev *dev;
@@ -2946,21 +2946,20 @@ rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
}
dev = &rte_eth_devices[port_id];
- if(vlan_id > ETHER_MAX_VLAN_ID)
- {
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
vlan_id);
return (-EINVAL);
}
- if (vf_mask == 0)
- {
+
+ if (vf_mask == 0) {
PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
- vf_mask,vlan_on);
+ vf_mask, vlan_on);
}
int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
@@ -3052,20 +3051,19 @@ rte_eth_mirror_rule_set(uint8_t port_id,
}
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
- "be 0-%d\n",ETH_64_POOLS - 1);
+ PMD_DEBUG_TRACE("Invalid dst pool, pool id must "
+ "be 0-%d\n", ETH_64_POOLS - 1);
return (-EINVAL);
}
if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
(mirror_conf->pool_mask == 0)) {
- PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
+ PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not "
"be 0.\n");
return (-EINVAL);
}
- if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
- {
+ if (rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) {
PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
ETH_VMDQ_NUM_MIRROR_RULE - 1);
return (-EINVAL);
@@ -3087,8 +3085,7 @@ rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
return (-ENODEV);
}
- if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
- {
+ if (rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) {
PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
ETH_VMDQ_NUM_MIRROR_RULE-1);
return (-EINVAL);
@@ -3171,8 +3168,8 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
- return (*dev->dev_ops->rx_descriptor_done)( \
- dev->data->rx_queues[queue_id], offset);
+ return (*dev->dev_ops->rx_descriptor_done)
+ (dev->data->rx_queues[queue_id], offset);
}
#endif
@@ -3204,8 +3201,9 @@ rte_eth_dev_callback_register(uint8_t port_id,
}
/* create a new callback. */
- if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
- sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
+ if (user_cb == NULL &&
+ !(user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0))) {
user_cb->cb_fn = cb_fn;
user_cb->cb_arg = cb_arg;
user_cb->event = event;
@@ -3293,7 +3291,8 @@ int rte_eth_dev_bypass_init(uint8_t port_id)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3313,7 +3312,8 @@ rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3332,7 +3332,8 @@ rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3352,7 +3353,8 @@ rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3372,7 +3374,8 @@ rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3392,7 +3395,8 @@ rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3412,7 +3416,8 @@ rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3432,7 +3437,8 @@ rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
@@ -3452,7 +3458,8 @@ rte_eth_dev_bypass_wd_reset(uint8_t port_id)
return (-ENODEV);
}
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ dev = &rte_eth_devices[port_id];
+ if (!dev) {
PMD_DEBUG_TRACE("Invalid port device\n");
return (-ENODEV);
}
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index e8df027..47a60e5 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -229,7 +229,7 @@ struct rte_eth_link {
uint16_t link_speed; /**< ETH_LINK_SPEED_[10, 100, 1000, 10000] */
uint16_t link_duplex; /**< ETH_LINK_[HALF_DUPLEX, FULL_DUPLEX] */
uint8_t link_status : 1; /**< 1 -> link up, 0 -> link down */
-}__attribute__((aligned(8))); /**< aligned for atomic64 read/write */
+} __attribute__((aligned(8))); /**< aligned for atomic64 read/write */
#define ETH_LINK_SPEED_AUTONEG 0 /**< Auto-negotiate link speed. */
#define ETH_LINK_SPEED_10 10 /**< 10 megabits/second. */
@@ -726,7 +726,7 @@ enum rte_l4type {
*/
enum rte_iptype {
RTE_FDIR_IPTYPE_IPV4 = 0, /**< IPv4. */
- RTE_FDIR_IPTYPE_IPV6 , /**< IPv6. */
+ RTE_FDIR_IPTYPE_IPV6, /**< IPv6. */
};
/**
@@ -840,7 +840,7 @@ struct rte_eth_conf {
struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
struct rte_eth_txmode txmode; /**< Port TX configuration. */
uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
- is 0, meaning the loopback mode is disabled.
+ is 0, meaning the loopback mode is disabled.
Read the datasheet of given ethernet controller
for details. The possible values of this field
are defined in implementation of each driver. */
@@ -1363,17 +1363,18 @@ struct eth_dev_ops {
reta_update_t reta_update;
/** Query redirection table. */
reta_query_t reta_query;
- /* bypass control */
+
#ifdef RTE_NIC_BYPASS
- bypass_init_t bypass_init;
- bypass_state_set_t bypass_state_set;
- bypass_state_show_t bypass_state_show;
- bypass_event_set_t bypass_event_set;
- bypass_event_show_t bypass_event_show;
- bypass_wd_timeout_set_t bypass_wd_timeout_set;
- bypass_wd_timeout_show_t bypass_wd_timeout_show;
- bypass_ver_show_t bypass_ver_show;
- bypass_wd_reset_t bypass_wd_reset;
+ /* bypass control */
+ bypass_init_t bypass_init;
+ bypass_state_set_t bypass_state_set;
+ bypass_state_show_t bypass_state_show;
+ bypass_event_set_t bypass_event_set;
+ bypass_event_show_t bypass_event_show;
+ bypass_wd_timeout_set_t bypass_wd_timeout_set;
+ bypass_wd_timeout_show_t bypass_wd_timeout_show;
+ bypass_ver_show_t bypass_ver_show;
+ bypass_wd_reset_t bypass_wd_reset;
#endif
/** Configure RSS hash protocols. */
@@ -1529,10 +1530,10 @@ struct rte_eth_dev_data {
/**< Common rx buffer size handled by all queues */
uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
- struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
+ struct ether_addr *mac_addrs;/**< Device Ethernet Link address. */
uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
/** bitmap array of associating Ethernet MAC addresses to pools */
- struct ether_addr* hash_mac_addrs;
+ struct ether_addr *hash_mac_addrs;
/** Device Ethernet MAC addresses of hash filtering. */
uint8_t port_id; /**< Device [external] port identifier. */
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
@@ -2228,7 +2229,7 @@ extern int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
* - (-ENOSYS) if VLAN filtering on *port_id* disabled.
* - (-EINVAL) if *vlan_id* > 4095.
*/
-extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id , int on);
+extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
/**
* Enable/Disable hardware VLAN Strip by a rx queue of an Ethernet device.
@@ -2449,10 +2450,10 @@ extern uint32_t rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id);
static inline uint32_t
rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
{
- struct rte_eth_dev *dev;
+ struct rte_eth_dev *dev;
- dev = &rte_eth_devices[port_id];
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ dev = &rte_eth_devices[port_id];
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
#endif
@@ -2481,8 +2482,8 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
struct rte_eth_dev *dev;
dev = &rte_eth_devices[port_id];
- return (*dev->dev_ops->rx_descriptor_done)( \
- dev->data->rx_queues[queue_id], offset);
+ return (*dev->dev_ops->rx_descriptor_done)
+ (dev->data->rx_queues[queue_id], offset);
}
#endif
@@ -2798,8 +2799,8 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
-typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
- enum rte_eth_event_type event, void *cb_arg);
+typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id,
+ enum rte_eth_event_type event, void *cb_arg);
/**< user application callback to be registered for interrupts */
@@ -3031,8 +3032,8 @@ int rte_eth_dev_rss_reta_query(uint8_t port,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
- uint8_t on);
+int rte_eth_dev_uc_hash_table_set(uint8_t port, struct ether_addr *addr,
+ uint8_t on);
/**
* Updates all unicast hash bitmaps for receiving packet with any Unicast
@@ -3051,7 +3052,7 @@ int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
+int rte_eth_dev_uc_all_hash_table_set(uint8_t port, uint8_t on);
/**
* Set RX L2 Filtering mode of a VF of an Ethernet device.
@@ -3095,7 +3096,7 @@ int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
* - (-EINVAL) if bad parameter.
*/
int
-rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
+rte_eth_dev_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
/**
* Enable or disable a VF traffic receive of an Ethernet device.
@@ -3114,7 +3115,7 @@ rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
int
-rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
+rte_eth_dev_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
--
2.1.4
next prev parent reply other threads:[~2015-04-09 21:29 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-04-09 21:29 [dpdk-dev] [PATCH 0/4] rte_ethdev: cleanups Stephen Hemminger
2015-04-09 21:29 ` [dpdk-dev] [PATCH 1/4] rte_ethdev: remove extra inline Stephen Hemminger
2015-04-15 9:52 ` Bruce Richardson
2015-04-09 21:29 ` Stephen Hemminger [this message]
2015-04-15 9:53 ` [dpdk-dev] [PATCH 2/4] rte_ethdev: whitespace cleanup Bruce Richardson
2015-04-09 21:29 ` [dpdk-dev] [PATCH 3/4] rte_ethdev: make tables const Stephen Hemminger
2015-04-15 9:54 ` Bruce Richardson
2015-04-09 21:29 ` [dpdk-dev] [PATCH 4/4] rte_ethdev: remove unnecessary paren on return Stephen Hemminger
2015-04-15 9:56 ` Bruce Richardson
2015-04-20 17:52 ` [dpdk-dev] [PATCH 0/4] rte_ethdev: cleanups Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1428614982-14135-3-git-send-email-stephen@networkplumber.org \
--to=stephen@networkplumber.org \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).