From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, niklas.soderlund@corigine.com,
Chaoyong He <chaoyong.he@corigine.com>
Subject: [PATCH 7/8] net/nfp: revise cast from void pointer
Date: Fri, 19 May 2023 10:59:49 +0800 [thread overview]
Message-ID: <20230519025950.1642943-8-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20230519025950.1642943-1-chaoyong.he@corigine.com>
When cast from void pointer to other pointer type, there is not need
to use '()' for force cast.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
drivers/net/nfp/flower/nfp_flower.c | 20 +++----
.../net/nfp/flower/nfp_flower_representor.c | 30 +++++------
drivers/net/nfp/nfp_common.c | 12 ++---
drivers/net/nfp/nfp_ethdev.c | 2 +-
drivers/net/nfp/nfp_ethdev_vf.c | 2 +-
drivers/net/nfp/nfp_flow.c | 54 +++++++++----------
drivers/net/nfp/nfp_rxtx.c | 2 +-
drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c | 2 +-
8 files changed, 60 insertions(+), 64 deletions(-)
diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 9a08ae3b75..72933e55d0 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -60,7 +60,7 @@ nfp_pf_repr_disable_queues(struct rte_eth_dev *dev)
uint32_t update = 0;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
@@ -89,7 +89,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
struct nfp_net_hw *hw;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
/* Disabling queues just in case... */
@@ -149,19 +149,19 @@ nfp_flower_pf_stop(struct rte_eth_dev *dev)
struct nfp_net_rxq *this_rx_q;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
nfp_pf_repr_disable_queues(dev);
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
+ this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
@@ -189,7 +189,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
pf_dev = hw->pf_dev;
app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
@@ -204,12 +204,12 @@ nfp_flower_pf_close(struct rte_eth_dev *dev)
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
+ this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
@@ -814,7 +814,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
/* Saving physical and virtual addresses for the RX ring */
rxq->dma = (uint64_t)tz->iova;
- rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+ rxq->rxds = tz->addr;
/* Mbuf pointers array for referencing mbufs linked to RX descriptors */
rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
@@ -877,7 +877,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
- txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
+ txq->txds = tz->addr;
/* Mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 0479eb4792..86b7d1a220 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -32,7 +32,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_memzone *tz;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
/* Allocating rx queue data structure */
@@ -78,7 +78,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
/* Saving physical and virtual addresses for the RX ring */
rxq->dma = (uint64_t)tz->iova;
- rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+ rxq->rxds = tz->addr;
/* mbuf pointers array for referencing mbufs linked to RX descriptors */
rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
@@ -116,7 +116,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_memzone *tz;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
hw = repr->app_fw_flower->pf_hw;
tx_free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
@@ -161,7 +161,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
- txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
+ txq->txds = tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
@@ -207,7 +207,7 @@ nfp_flower_repr_link_update(struct rte_eth_dev *dev,
[NFP_NET_CFG_STS_LINK_RATE_100G] = RTE_ETH_SPEED_NUM_100G,
};
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
link = &repr->link;
pf_hw = repr->app_fw_flower->pf_hw;
@@ -273,7 +273,7 @@ nfp_flower_repr_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
pf_hw = repr->app_fw_flower->pf_hw;
dev_conf = &dev->data->dev_conf;
@@ -295,7 +295,7 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
struct nfp_flower_representor *repr;
struct nfp_app_fw_flower *app_fw_flower;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
app_fw_flower = repr->app_fw_flower;
if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
@@ -314,7 +314,7 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev)
struct nfp_flower_representor *repr;
struct nfp_app_fw_flower *app_fw_flower;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
app_fw_flower = repr->app_fw_flower;
nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, false);
@@ -339,7 +339,7 @@ nfp_flower_repr_rx_queue_setup(struct rte_eth_dev *dev,
struct nfp_net_hw *pf_hw;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
pf_hw = repr->app_fw_flower->pf_hw;
/* Allocating rx queue data structure */
@@ -367,7 +367,7 @@ nfp_flower_repr_tx_queue_setup(struct rte_eth_dev *dev,
struct nfp_net_hw *pf_hw;
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
pf_hw = repr->app_fw_flower->pf_hw;
/* Allocating tx queue data structure */
@@ -390,7 +390,7 @@ nfp_flower_repr_stats_get(struct rte_eth_dev *ethdev,
{
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ repr = ethdev->data->dev_private;
rte_memcpy(stats, &repr->repr_stats, sizeof(struct rte_eth_stats));
return 0;
@@ -401,7 +401,7 @@ nfp_flower_repr_stats_reset(struct rte_eth_dev *ethdev)
{
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ repr = ethdev->data->dev_private;
memset(&repr->repr_stats, 0, sizeof(struct rte_eth_stats));
return 0;
@@ -413,7 +413,7 @@ nfp_flower_repr_mac_addr_set(struct rte_eth_dev *ethdev,
{
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ repr = ethdev->data->dev_private;
rte_ether_addr_copy(mac_addr, &repr->mac_addr);
rte_ether_addr_copy(mac_addr, ethdev->data->mac_addrs);
@@ -584,7 +584,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
struct nfp_flower_representor *init_repr_data;
/* Cast the input representor data to the correct struct here */
- init_repr_data = (struct nfp_flower_representor *)init_params;
+ init_repr_data = init_params;
/* Memory has been allocated in the eth_dev_create() function */
repr = eth_dev->data->dev_private;
@@ -639,7 +639,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
struct nfp_flower_representor *init_repr_data;
/* Cast the input representor data to the correct struct here */
- init_repr_data = (struct nfp_flower_representor *)init_params;
+ init_repr_data = init_params;
app_fw_flower = init_repr_data->app_fw_flower;
/* Memory has been allocated in the eth_dev_create() function */
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 634afbf1b5..b7a7296999 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -1404,7 +1404,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
void
nfp_net_dev_interrupt_delayed_handler(void *param)
{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_eth_dev *dev = param;
nfp_net_link_update(dev, 0);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
@@ -1420,7 +1420,7 @@ nfp_net_dev_interrupt_handler(void *param)
{
int64_t timeout;
struct rte_eth_link link;
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_eth_dev *dev = param;
PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
@@ -1840,7 +1840,7 @@ nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
struct nfp_net_rxq *this_rx_q;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
}
@@ -1852,7 +1852,7 @@ nfp_net_close_rx_queue(struct rte_eth_dev *dev)
struct nfp_net_rxq *this_rx_q;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
+ this_rx_q = dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
nfp_net_rx_queue_release(dev, i);
}
@@ -1865,7 +1865,7 @@ nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
struct nfp_net_txq *this_tx_q;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
+ this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
}
@@ -1877,7 +1877,7 @@ nfp_net_close_tx_queue(struct rte_eth_dev *dev)
struct nfp_net_txq *this_tx_q;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
+ this_tx_q = dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
nfp_net_tx_queue_release(dev, i);
}
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 3a56726388..06146050ff 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -542,7 +542,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
- hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->ctrl_bar = pci_dev->mem_resource[0].addr;
if (hw->ctrl_bar == NULL) {
PMD_DRV_LOG(ERR,
"hw->ctrl_bar is NULL. BAR0 not configured");
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index f8135fa0c8..bac8df6142 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -289,7 +289,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->ctrl_bar = pci_dev->mem_resource[0].addr;
if (hw->ctrl_bar == NULL) {
PMD_DRV_LOG(ERR,
"hw->ctrl_bar is NULL. BAR0 not configured");
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index fdf5c0f40e..b46acf6a39 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -93,7 +93,7 @@ nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
{
struct nfp_flower_representor *repr;
- repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ repr = dev->data->dev_private;
return repr->app_fw_flower->flow_priv;
}
@@ -726,8 +726,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
if (port_id->id >= RTE_MAX_ETHPORTS)
return -ERANGE;
ethdev = &rte_eth_devices[port_id->id];
- representor = (struct nfp_flower_representor *)
- ethdev->data->dev_private;
+ representor = ethdev->data->dev_private;
key_ls->port = rte_cpu_to_be_32(representor->port_id);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
@@ -2047,7 +2046,7 @@ nfp_flow_action_output(char *act_data,
return -ERANGE;
ethdev = &rte_eth_devices[port_id->id];
- representor = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ representor = ethdev->data->dev_private;
act_size = sizeof(struct nfp_fl_act_output);
output = (struct nfp_fl_act_output *)act_data;
@@ -2083,7 +2082,7 @@ nfp_flow_action_set_mac(char *act_data,
set_eth->head.len_lw = act_size >> NFP_FL_LW_SIZ;
set_eth->reserved = 0;
- set_mac = (const struct rte_flow_action_set_mac *)action->conf;
+ set_mac = action->conf;
if (mac_src_flag) {
rte_memcpy(&set_eth->eth_addr[RTE_ETHER_ADDR_LEN],
set_mac->mac_addr, RTE_ETHER_ADDR_LEN);
@@ -2133,7 +2132,7 @@ nfp_flow_action_set_ip(char *act_data,
set_ip->head.len_lw = act_size >> NFP_FL_LW_SIZ;
set_ip->reserved = 0;
- set_ipv4 = (const struct rte_flow_action_set_ipv4 *)action->conf;
+ set_ipv4 = action->conf;
if (ip_src_flag) {
set_ip->ipv4_src = set_ipv4->ipv4_addr;
set_ip->ipv4_src_mask = RTE_BE32(0xffffffff);
@@ -2154,7 +2153,7 @@ nfp_flow_action_set_ipv6(char *act_data,
const struct rte_flow_action_set_ipv6 *set_ipv6;
set_ip = (struct nfp_fl_act_set_ipv6_addr *)act_data;
- set_ipv6 = (const struct rte_flow_action_set_ipv6 *)action->conf;
+ set_ipv6 = action->conf;
if (ip_src_flag)
set_ip->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_SRC;
@@ -2191,7 +2190,7 @@ nfp_flow_action_set_tp(char *act_data,
set_tp->head.len_lw = act_size >> NFP_FL_LW_SIZ;
set_tp->reserved = 0;
- set_tp_conf = (const struct rte_flow_action_set_tp *)action->conf;
+ set_tp_conf = action->conf;
if (tp_src_flag) {
set_tp->src_port = set_tp_conf->port;
set_tp->src_port_mask = RTE_BE16(0xffff);
@@ -2223,12 +2222,9 @@ nfp_flow_action_push_vlan(char *act_data,
push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
- push_vlan_conf = (const struct rte_flow_action_of_push_vlan *)
- action->conf;
- vlan_pcp_conf = (const struct rte_flow_action_of_set_vlan_pcp *)
- (action + 1)->conf;
- vlan_vid_conf = (const struct rte_flow_action_of_set_vlan_vid *)
- (action + 2)->conf;
+ push_vlan_conf = action->conf;
+ vlan_pcp_conf = (action + 1)->conf;
+ vlan_vid_conf = (action + 2)->conf;
vid = rte_be_to_cpu_16(vlan_vid_conf->vlan_vid) & 0x0fff;
pcp = vlan_pcp_conf->vlan_pcp & 0x07;
@@ -2256,7 +2252,7 @@ nfp_flow_action_set_ttl(char *act_data,
ttl_tos->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
ttl_tos->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ttl_conf = (const struct rte_flow_action_set_ttl *)action->conf;
+ ttl_conf = action->conf;
ttl_tos->ipv4_ttl = ttl_conf->ttl_value;
ttl_tos->ipv4_ttl_mask = 0xff;
ttl_tos->reserved = 0;
@@ -2280,7 +2276,7 @@ nfp_flow_action_set_hl(char *act_data,
tc_hl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
tc_hl->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ttl_conf = (const struct rte_flow_action_set_ttl *)action->conf;
+ ttl_conf = action->conf;
tc_hl->ipv6_hop_limit = ttl_conf->ttl_value;
tc_hl->ipv6_hop_limit_mask = 0xff;
tc_hl->reserved = 0;
@@ -2304,7 +2300,7 @@ nfp_flow_action_set_tos(char *act_data,
ttl_tos->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
ttl_tos->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- tos_conf = (const struct rte_flow_action_set_dscp *)action->conf;
+ tos_conf = action->conf;
ttl_tos->ipv4_tos = tos_conf->dscp;
ttl_tos->ipv4_tos_mask = 0xff;
ttl_tos->reserved = 0;
@@ -2328,7 +2324,7 @@ nfp_flow_action_set_tc(char *act_data,
tc_hl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
tc_hl->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- tos_conf = (const struct rte_flow_action_set_dscp *)action->conf;
+ tos_conf = action->conf;
tc_hl->ipv6_tc = tos_conf->dscp;
tc_hl->ipv6_tc_mask = 0xff;
tc_hl->reserved = 0;
@@ -2719,9 +2715,9 @@ nfp_flow_action_vxlan_encap_v4(struct nfp_app_fw_flower *app_fw_flower,
size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun);
size_t act_set_size = sizeof(struct nfp_fl_act_set_tun);
- eth = (const struct rte_flow_item_eth *)vxlan_data->items[0].spec;
- ipv4 = (const struct rte_flow_item_ipv4 *)vxlan_data->items[1].spec;
- vxlan = (const struct rte_flow_item_vxlan *)vxlan_data->items[3].spec;
+ eth = vxlan_data->items[0].spec;
+ ipv4 = vxlan_data->items[1].spec;
+ vxlan = vxlan_data->items[3].spec;
pre_tun = (struct nfp_fl_act_pre_tun *)actions;
memset(pre_tun, 0, act_pre_size);
@@ -2756,9 +2752,9 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun);
size_t act_set_size = sizeof(struct nfp_fl_act_set_tun);
- eth = (const struct rte_flow_item_eth *)vxlan_data->items[0].spec;
- ipv6 = (const struct rte_flow_item_ipv6 *)vxlan_data->items[1].spec;
- vxlan = (const struct rte_flow_item_vxlan *)vxlan_data->items[3].spec;
+ eth = vxlan_data->items[0].spec;
+ ipv6 = vxlan_data->items[1].spec;
+ vxlan = vxlan_data->items[3].spec;
pre_tun = (struct nfp_fl_act_pre_tun *)actions;
memset(pre_tun, 0, act_pre_size);
@@ -3626,7 +3622,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
nfp_flower_update_meta_tci(nfp_flow->payload.unmasked_data, new_mask_id);
/* Calculate and store the hash_key for later use */
- hash_data = (char *)(nfp_flow->payload.unmasked_data);
+ hash_data = nfp_flow->payload.unmasked_data;
nfp_flow->hash_key = rte_jhash(hash_data, nfp_flow->length, priv->hash_seed);
/* Find the flow in hash table */
@@ -3716,7 +3712,7 @@ nfp_flow_validate(struct rte_eth_dev *dev,
struct nfp_flow_priv *priv;
struct nfp_flower_representor *representor;
- representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ representor = dev->data->dev_private;
priv = representor->app_fw_flower->flow_priv;
nfp_flow = nfp_flow_setup(representor, attr, items, actions, error, true);
@@ -3751,7 +3747,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
struct nfp_app_fw_flower *app_fw_flower;
struct nfp_flower_representor *representor;
- representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ representor = dev->data->dev_private;
app_fw_flower = representor->app_fw_flower;
priv = app_fw_flower->flow_priv;
@@ -3813,7 +3809,7 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
struct nfp_app_fw_flower *app_fw_flower;
struct nfp_flower_representor *representor;
- representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ representor = dev->data->dev_private;
app_fw_flower = representor->app_fw_flower;
priv = app_fw_flower->flow_priv;
@@ -3949,7 +3945,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev,
return;
}
- query = (struct rte_flow_query_count *)data;
+ query = data;
reset = query->reset;
memset(query, 0, sizeof(*query));
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 2409f63205..3c78557221 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -630,7 +630,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
/* Saving physical and virtual addresses for the RX ring */
rxq->dma = (uint64_t)tz->iova;
- rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+ rxq->rxds = tz->addr;
/* mbuf pointers array for referencing mbufs linked to RX descriptors */
rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
index edf4088747..9d63e0ee73 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -835,7 +835,7 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
if (nfp6000_set_barsz(dev, desc) < 0)
goto error;
- desc->cfg = (char *)dev->mem_resource[0].addr;
+ desc->cfg = dev->mem_resource[0].addr;
nfp_enable_bars(desc);
--
2.39.1
next prev parent reply other threads:[~2023-05-19 3:01 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-19 2:59 [PATCH 0/8] make the logic conform the coding style of DPDK Chaoyong He
2023-05-19 2:59 ` [PATCH 1/8] net/nfp: reuse the ring buffer struct Chaoyong He
2023-05-19 2:59 ` [PATCH 2/8] net/nfp: modify the rxq struct Chaoyong He
2023-05-19 2:59 ` [PATCH 3/8] net/nfp: modify the Rx descriptor struct Chaoyong He
2023-05-19 2:59 ` [PATCH 4/8] net/nfp: modify the txq struct Chaoyong He
2023-05-19 2:59 ` [PATCH 5/8] net/nfp: remove the custom round macro Chaoyong He
2023-05-19 2:59 ` [PATCH 6/8] net/nfp: remove the unneeded comment Chaoyong He
2023-05-19 2:59 ` Chaoyong He [this message]
2023-05-19 2:59 ` [PATCH 8/8] net/nfp: revise the logic of MAC address Chaoyong He
2023-05-19 13:55 ` [PATCH 0/8] make the logic conform the coding style of DPDK Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230519025950.1642943-8-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=niklas.soderlund@corigine.com \
--cc=oss-drivers@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).