From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, Chaoyong He <chaoyong.he@corigine.com>,
Long Wu <long.wu@corigine.com>,
Peng Zhang <peng.zhang@corigine.com>
Subject: [PATCH v2 06/11] net/nfp: standard the comment style
Date: Thu, 12 Oct 2023 09:26:59 +0800 [thread overview]
Message-ID: <20231012012704.483828-7-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20231012012704.483828-1-chaoyong.he@corigine.com>
Follow the DPDK coding style, use the kdoc comment style.
Also delete some comment which are not valid anymore and add some
comment to help understand logic.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
drivers/net/nfp/flower/nfp_conntrack.c | 4 +-
drivers/net/nfp/flower/nfp_flower.c | 10 +-
drivers/net/nfp/flower/nfp_flower.h | 28 ++--
drivers/net/nfp/flower/nfp_flower_cmsg.c | 2 +-
drivers/net/nfp/flower/nfp_flower_cmsg.h | 56 +++----
drivers/net/nfp/flower/nfp_flower_ctrl.c | 16 +-
.../net/nfp/flower/nfp_flower_representor.c | 42 +++--
.../net/nfp/flower/nfp_flower_representor.h | 2 +-
drivers/net/nfp/nfd3/nfp_nfd3.h | 33 ++--
drivers/net/nfp/nfd3/nfp_nfd3_dp.c | 24 ++-
drivers/net/nfp/nfdk/nfp_nfdk.h | 41 ++---
drivers/net/nfp/nfdk/nfp_nfdk_dp.c | 8 +-
drivers/net/nfp/nfp_common.c | 152 ++++++++----------
drivers/net/nfp/nfp_common.h | 61 +++----
drivers/net/nfp/nfp_cpp_bridge.c | 6 +-
drivers/net/nfp/nfp_ctrl.h | 34 ++--
drivers/net/nfp/nfp_ethdev.c | 40 +++--
drivers/net/nfp/nfp_ethdev_vf.c | 15 +-
drivers/net/nfp/nfp_flow.c | 62 +++----
drivers/net/nfp/nfp_flow.h | 10 +-
drivers/net/nfp/nfp_ipsec.h | 12 +-
drivers/net/nfp/nfp_rxtx.c | 125 ++++++--------
drivers/net/nfp/nfp_rxtx.h | 18 +--
23 files changed, 354 insertions(+), 447 deletions(-)
diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index 7b84b12546..f89003be8b 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -667,8 +667,8 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
{
bool ret;
uint8_t loop;
- uint8_t item_cnt = 1; /* the RTE_FLOW_ITEM_TYPE_END */
- uint8_t action_cnt = 1; /* the RTE_FLOW_ACTION_TYPE_END */
+ uint8_t item_cnt = 1; /* The RTE_FLOW_ITEM_TYPE_END */
+ uint8_t action_cnt = 1; /* The RTE_FLOW_ACTION_TYPE_END */
struct nfp_flow_priv *priv;
struct nfp_ct_map_entry *me;
struct nfp_ct_flow_entry *fe;
diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 7a4e671178..4453ae7b5e 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -208,7 +208,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev)
nfp_net_reset_rx_queue(this_rx_q);
}
- /* Cancel possible impending LSC work here before releasing the port*/
+ /* Cancel possible impending LSC work here before releasing the port */
rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
@@ -488,7 +488,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
/*
* Tracking mbuf size for detecting a potential mbuf overflow due to
- * RX offset
+ * RX offset.
*/
rxq->mem_pool = mp;
rxq->mbuf_size = rxq->mem_pool->elt_size;
@@ -535,7 +535,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
/*
* Telling the HW about the physical address of the RX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(i), rxq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
@@ -600,7 +600,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
/*
* Telling the HW about the physical address of the TX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(i), txq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
@@ -758,7 +758,7 @@ nfp_flower_enable_services(struct nfp_app_fw_flower *app_fw_flower)
app_fw_flower->ctrl_vnic_id = service_id;
PMD_INIT_LOG(INFO, "%s registered", flower_service.name);
- /* Map them to available service cores*/
+ /* Map them to available service cores */
ret = nfp_map_service(service_id);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Could not map %s", flower_service.name);
diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h
index 244b6daa37..0b4e38cedd 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -53,49 +53,49 @@ struct nfp_flower_nfd_func {
/* The flower application's private structure */
struct nfp_app_fw_flower {
- /* switch domain for this app */
+ /** Switch domain for this app */
uint16_t switch_domain_id;
- /* Number of VF representors */
+ /** Number of VF representors */
uint8_t num_vf_reprs;
- /* Number of phyport representors */
+ /** Number of phyport representors */
uint8_t num_phyport_reprs;
- /* Pointer to the PF vNIC */
+ /** Pointer to the PF vNIC */
struct nfp_net_hw *pf_hw;
- /* Pointer to a mempool for the ctrlvNIC */
+ /** Pointer to a mempool for the Ctrl vNIC */
struct rte_mempool *ctrl_pktmbuf_pool;
- /* Pointer to the ctrl vNIC */
+ /** Pointer to the ctrl vNIC */
struct nfp_net_hw *ctrl_hw;
- /* Ctrl vNIC Rx counter */
+ /** Ctrl vNIC Rx counter */
uint64_t ctrl_vnic_rx_count;
- /* Ctrl vNIC Tx counter */
+ /** Ctrl vNIC Tx counter */
uint64_t ctrl_vnic_tx_count;
- /* Array of phyport representors */
+ /** Array of phyport representors */
struct nfp_flower_representor *phy_reprs[MAX_FLOWER_PHYPORTS];
- /* Array of VF representors */
+ /** Array of VF representors */
struct nfp_flower_representor *vf_reprs[MAX_FLOWER_VFS];
- /* PF representor */
+ /** PF representor */
struct nfp_flower_representor *pf_repr;
- /* service id of ctrl vnic service */
+ /** Service id of Ctrl vNIC service */
uint32_t ctrl_vnic_id;
- /* Flower extra features */
+ /** Flower extra features */
uint64_t ext_features;
struct nfp_flow_priv *flow_priv;
struct nfp_mtr_priv *mtr_priv;
- /* Function pointers for different NFD version */
+ /** Function pointers for different NFD version */
struct nfp_flower_nfd_func nfd_func;
};
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index 5d6912b079..2ec9498d22 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -230,7 +230,7 @@ nfp_flower_cmsg_flow_add(struct nfp_app_fw_flower *app_fw_flower,
return -ENOMEM;
}
- /* copy the flow to mbuf */
+ /* Copy the flow to mbuf */
nfp_flow_meta = flow->payload.meta;
msg_len = (nfp_flow_meta->key_len + nfp_flow_meta->mask_len +
nfp_flow_meta->act_len) << NFP_FL_LW_SIZ;
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index 9449760145..cb019171b6 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -348,7 +348,7 @@ struct nfp_flower_stats_frame {
rte_be64_t stats_cookie;
};
-/**
+/*
* See RFC 2698 for more details.
* Word[0](Flag options):
* [15] p(pps) 1 for pps, 0 for bps
@@ -378,40 +378,24 @@ struct nfp_cfg_head {
rte_be32_t profile_id;
};
-/**
- * Struct nfp_profile_conf - profile config, offload to NIC
- * @head: config head information
- * @bkt_tkn_p: token bucket peak
- * @bkt_tkn_c: token bucket committed
- * @pbs: peak burst size
- * @cbs: committed burst size
- * @pir: peak information rate
- * @cir: committed information rate
- */
+/* Profile config, offload to NIC */
struct nfp_profile_conf {
- struct nfp_cfg_head head;
- rte_be32_t bkt_tkn_p;
- rte_be32_t bkt_tkn_c;
- rte_be32_t pbs;
- rte_be32_t cbs;
- rte_be32_t pir;
- rte_be32_t cir;
-};
-
-/**
- * Struct nfp_mtr_stats_reply - meter stats, read from firmware
- * @head: config head information
- * @pass_bytes: count of passed bytes
- * @pass_pkts: count of passed packets
- * @drop_bytes: count of dropped bytes
- * @drop_pkts: count of dropped packets
- */
+ struct nfp_cfg_head head; /**< Config head information */
+ rte_be32_t bkt_tkn_p; /**< Token bucket peak */
+ rte_be32_t bkt_tkn_c; /**< Token bucket committed */
+ rte_be32_t pbs; /**< Peak burst size */
+ rte_be32_t cbs; /**< Committed burst size */
+ rte_be32_t pir; /**< Peak information rate */
+ rte_be32_t cir; /**< Committed information rate */
+};
+
+/* Meter stats, read from firmware */
struct nfp_mtr_stats_reply {
- struct nfp_cfg_head head;
- rte_be64_t pass_bytes;
- rte_be64_t pass_pkts;
- rte_be64_t drop_bytes;
- rte_be64_t drop_pkts;
+ struct nfp_cfg_head head; /**< Config head information */
+ rte_be64_t pass_bytes; /**< Count of passed bytes */
+ rte_be64_t pass_pkts; /**< Count of passed packets */
+ rte_be64_t drop_bytes; /**< Count of dropped bytes */
+ rte_be64_t drop_pkts; /**< Count of dropped packets */
};
enum nfp_flower_cmsg_port_type {
@@ -851,7 +835,7 @@ struct nfp_fl_act_set_ipv6_addr {
};
/*
- * ipv6 tc hl fl
+ * Ipv6 tc hl fl
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -954,9 +938,9 @@ struct nfp_fl_act_set_tun {
uint8_t tos;
rte_be16_t outer_vlan_tpid;
rte_be16_t outer_vlan_tci;
- uint8_t tun_len; /* Only valid for NFP_FL_TUNNEL_GENEVE */
+ uint8_t tun_len; /**< Only valid for NFP_FL_TUNNEL_GENEVE */
uint8_t reserved2;
- rte_be16_t tun_proto; /* Only valid for NFP_FL_TUNNEL_GENEVE */
+ rte_be16_t tun_proto; /**< Only valid for NFP_FL_TUNNEL_GENEVE */
} __rte_packed;
/*
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index 1f4c5fd7f9..15d27f2ac7 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -34,7 +34,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
if (unlikely(rxq == NULL)) {
/*
* DPDK just checks the queue is lower than max queues
- * enabled. But the queue needs to be configured
+ * enabled. But the queue needs to be configured.
*/
PMD_RX_LOG(ERR, "RX Bad queue");
return 0;
@@ -60,7 +60,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
/*
* We got a packet. Let's alloc a new mbuf for refilling the
- * free descriptor ring as soon as possible
+ * free descriptor ring as soon as possible.
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
@@ -72,7 +72,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
/*
* Grab the mbuf and refill the descriptor with the
- * previously allocated mbuf
+ * previously allocated mbuf.
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
@@ -86,7 +86,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
/*
* This should not happen and the user has the
* responsibility of avoiding it. But we have
- * to give some info about the error
+ * to give some info about the error.
*/
PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset.\n"
"\t\tYour mbuf size should have extra space for"
@@ -123,7 +123,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
nb_hold++;
rxq->rd_p++;
- if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */
rxq->rd_p = 0;
}
@@ -170,7 +170,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,
if (unlikely(txq == NULL)) {
/*
* DPDK just checks the queue is lower than max queues
- * enabled. But the queue needs to be configured
+ * enabled. But the queue needs to be configured.
*/
PMD_TX_LOG(ERR, "ctrl dev TX Bad queue");
goto xmit_end;
@@ -206,7 +206,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,
txds->offset_eop = FLOWER_PKT_DATA_OFFSET | NFD3_DESC_TX_EOP;
txq->wr_p++;
- if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ if (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */
txq->wr_p = 0;
cnt++;
@@ -520,7 +520,7 @@ nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower)
ctrl_hw = app_fw_flower->ctrl_hw;
ctrl_eth_dev = ctrl_hw->eth_dev;
- /* ctrl vNIC only has a single Rx queue */
+ /* Ctrl vNIC only has a single Rx queue */
rxq = ctrl_eth_dev->data->rx_queues[0];
while (rte_service_runstate_get(app_fw_flower->ctrl_vnic_id) != 0) {
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index be0dfb2890..e023a7d8dc 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -10,18 +10,12 @@
#include "../nfp_logs.h"
#include "../nfp_mtr.h"
-/*
- * enum nfp_repr_type - type of representor
- * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
- * @NFP_REPR_TYPE_PF: physical function
- * @NFP_REPR_TYPE_VF: virtual function
- * @NFP_REPR_TYPE_MAX: number of representor types
- */
+/* Type of representor */
enum nfp_repr_type {
- NFP_REPR_TYPE_PHYS_PORT,
- NFP_REPR_TYPE_PF,
- NFP_REPR_TYPE_VF,
- NFP_REPR_TYPE_MAX,
+ NFP_REPR_TYPE_PHYS_PORT, /*<< External NIC port */
+ NFP_REPR_TYPE_PF, /*<< Physical function */
+ NFP_REPR_TYPE_VF, /*<< Virtual function */
+ NFP_REPR_TYPE_MAX, /*<< Number of representor types */
};
static int
@@ -55,7 +49,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Tracking mbuf size for detecting a potential mbuf overflow due to
- * RX offset
+ * RX offset.
*/
rxq->mem_pool = mp;
rxq->mbuf_size = rxq->mem_pool->elt_size;
@@ -86,7 +80,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dma = (uint64_t)tz->iova;
rxq->rxds = tz->addr;
- /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+ /* Mbuf pointers array for referencing mbufs linked to RX descriptors */
rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
sizeof(*rxq->rxbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
@@ -101,7 +95,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Telling the HW about the physical address of the RX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
@@ -159,7 +153,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_count = nb_desc;
txq->tx_free_thresh = tx_free_thresh;
- /* queue mapping based on firmware configuration */
+ /* Queue mapping based on firmware configuration */
txq->qidx = queue_idx;
txq->tx_qcidx = queue_idx * hw->stride_tx;
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
@@ -170,7 +164,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
txq->dma = (uint64_t)tz->iova;
txq->txds = tz->addr;
- /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+ /* Mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
@@ -185,7 +179,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Telling the HW about the physical address of the TX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
@@ -603,7 +597,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
/* Memory has been allocated in the eth_dev_create() function */
repr = eth_dev->data->dev_private;
- /* Copy data here from the input representor template*/
+ /* Copy data here from the input representor template */
repr->vf_id = init_repr_data->vf_id;
repr->switch_domain_id = init_repr_data->switch_domain_id;
repr->repr_type = init_repr_data->repr_type;
@@ -672,7 +666,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
return -ENOMEM;
}
- /* Copy data here from the input representor template*/
+ /* Copy data here from the input representor template */
repr->vf_id = init_repr_data->vf_id;
repr->switch_domain_id = init_repr_data->switch_domain_id;
repr->port_id = init_repr_data->port_id;
@@ -752,7 +746,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;
eth_dev = app_fw_flower->ctrl_hw->eth_dev;
- /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware*/
+ /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */
ret = nfp_flower_cmsg_mac_repr(app_fw_flower);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs");
@@ -795,8 +789,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
"%s_repr_p%d", pci_name, i);
/*
- * Create a eth_dev for this representor
- * This will also allocate private memory for the device
+ * Create a eth_dev for this representor.
+ * This will also allocate private memory for the device.
*/
ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
sizeof(struct nfp_flower_representor),
@@ -812,7 +806,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
/*
* Now allocate eth_dev's for VF representors.
- * Also send reify messages
+ * Also send reify messages.
*/
for (i = 0; i < app_fw_flower->num_vf_reprs; i++) {
flower_repr.repr_type = NFP_REPR_TYPE_VF;
@@ -826,7 +820,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
snprintf(flower_repr.name, sizeof(flower_repr.name),
"%s_repr_vf%d", pci_name, i);
- /* This will also allocate private memory for the device*/
+ /* This will also allocate private memory for the device */
ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
sizeof(struct nfp_flower_representor),
NULL, NULL, nfp_flower_repr_init, &flower_repr);
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.h b/drivers/net/nfp/flower/nfp_flower_representor.h
index 5ac5e38186..eda19cbb16 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.h
+++ b/drivers/net/nfp/flower/nfp_flower_representor.h
@@ -13,7 +13,7 @@ struct nfp_flower_representor {
uint16_t switch_domain_id;
uint32_t repr_type;
uint32_t port_id;
- uint32_t nfp_idx; /* only valid for the repr of physical port */
+ uint32_t nfp_idx; /**< Only valid for the repr of physical port */
char name[RTE_ETH_NAME_MAX_LEN];
struct rte_ether_addr mac_addr;
struct nfp_app_fw_flower *app_fw_flower;
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h
index 7c56ca4908..0b0ca361f4 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3.h
+++ b/drivers/net/nfp/nfd3/nfp_nfd3.h
@@ -17,24 +17,24 @@
struct nfp_net_nfd3_tx_desc {
union {
struct {
- uint8_t dma_addr_hi; /* High bits of host buf address */
- uint16_t dma_len; /* Length to DMA for this desc */
- /* Offset in buf where pkt starts + highest bit is eop flag */
+ uint8_t dma_addr_hi; /**< High bits of host buf address */
+ uint16_t dma_len; /**< Length to DMA for this desc */
+ /** Offset in buf where pkt starts + highest bit is eop flag */
uint8_t offset_eop;
- uint32_t dma_addr_lo; /* Low 32bit of host buf addr */
+ uint32_t dma_addr_lo; /**< Low 32bit of host buf addr */
- uint16_t mss; /* MSS to be used for LSO */
- uint8_t lso_hdrlen; /* LSO, where the data starts */
- uint8_t flags; /* TX Flags, see @NFD3_DESC_TX_* */
+ uint16_t mss; /**< MSS to be used for LSO */
+ uint8_t lso_hdrlen; /**< LSO, where the data starts */
+ uint8_t flags; /**< TX Flags, see @NFD3_DESC_TX_* */
union {
struct {
- uint8_t l3_offset; /* L3 header offset */
- uint8_t l4_offset; /* L4 header offset */
+ uint8_t l3_offset; /**< L3 header offset */
+ uint8_t l4_offset; /**< L4 header offset */
};
- uint16_t vlan; /* VLAN tag to add if indicated */
+ uint16_t vlan; /**< VLAN tag to add if indicated */
};
- uint16_t data_len; /* Length of frame + meta data */
+ uint16_t data_len; /**< Length of frame + meta data */
} __rte_packed;
uint32_t vals[4];
};
@@ -54,13 +54,14 @@ nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
return (free_desc > 8) ? (free_desc - 8) : 0;
}
-/*
- * nfp_net_nfd3_txq_full() - Check if the TX queue free descriptors
- * is below tx_free_threshold for firmware of nfd3
- *
- * @txq: TX queue to check
+/**
+ * Check if the TX queue free descriptors is below tx_free_threshold
+ * for firmware with nfd3
*
* This function uses the host copy* of read/write pointers.
+ *
+ * @param txq
+ * TX queue to check
*/
static inline bool
nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index 51755f4324..4df2c5d4d2 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -113,14 +113,12 @@ nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf,
}
/*
- * nfp_net_nfd3_tx_vlan() - Set vlan info in the nfd3 tx desc
+ * Set vlan info in the nfd3 tx desc
*
* If enable NFP_NET_CFG_CTRL_TXVLAN_V2
- * Vlan_info is stored in the meta and
- * is handled in the nfp_net_nfd3_set_meta_vlan()
+ * Vlan_info is stored in the meta and is handled in the @nfp_net_nfd3_set_meta_vlan()
* else if enable NFP_NET_CFG_CTRL_TXVLAN
- * Vlan_info is stored in the tx_desc and
- * is handled in the nfp_net_nfd3_tx_vlan()
+ * Vlan_info is stored in the tx_desc and is handled in the @nfp_net_nfd3_tx_vlan()
*/
static inline void
nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
@@ -299,9 +297,9 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
nfp_net_nfd3_tx_vlan(txq, &txd, pkt);
/*
- * mbuf data_len is the data in one segment and pkt_len data
+ * Mbuf data_len is the data in one segment and pkt_len data
* in the whole packet. When the packet is just one segment,
- * then data_len = pkt_len
+ * then data_len = pkt_len.
*/
pkt_size = pkt->pkt_len;
@@ -315,7 +313,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
/*
* Linking mbuf with descriptor for being released
- * next time descriptor is used
+ * next time descriptor is used.
*/
*lmbuf = pkt;
@@ -330,14 +328,14 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
free_descs--;
txq->wr_p++;
- if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping */
+ if (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */
txq->wr_p = 0;
pkt_size -= dma_size;
/*
* Making the EOP, packets with just one segment
- * the priority
+ * the priority.
*/
if (likely(pkt_size == 0))
txds->offset_eop = NFD3_DESC_TX_EOP;
@@ -439,7 +437,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_count = nb_desc * NFD3_TX_DESC_PER_PKT;
txq->tx_free_thresh = tx_free_thresh;
- /* queue mapping based on firmware configuration */
+ /* Queue mapping based on firmware configuration */
txq->qidx = queue_idx;
txq->tx_qcidx = queue_idx * hw->stride_tx;
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
@@ -449,7 +447,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
txq->dma = tz->iova;
txq->txds = tz->addr;
- /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+ /* Mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
sizeof(*txq->txbufs) * txq->tx_count,
RTE_CACHE_LINE_SIZE, socket_id);
@@ -465,7 +463,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Telling the HW about the physical address of the TX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk.h b/drivers/net/nfp/nfdk/nfp_nfdk.h
index 99675b6bd7..04bd3c7600 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk.h
+++ b/drivers/net/nfp/nfdk/nfp_nfdk.h
@@ -75,7 +75,7 @@
* dma_addr_hi - bits [47:32] of host memory address
* dma_addr_lo - bits [31:0] of host memory address
*
- * --> metadata descriptor
+ * --> Metadata descriptor
* Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
* -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* Word +-------+-----------------------+---------------------+---+-----+
@@ -104,27 +104,27 @@
*/
struct nfp_net_nfdk_tx_desc {
union {
- /* Address descriptor */
+ /** Address descriptor */
struct {
- uint16_t dma_addr_hi; /* High bits of host buf address */
- uint16_t dma_len_type; /* Length to DMA for this desc */
- uint32_t dma_addr_lo; /* Low 32bit of host buf addr */
+ uint16_t dma_addr_hi; /**< High bits of host buf address */
+ uint16_t dma_len_type; /**< Length to DMA for this desc */
+ uint32_t dma_addr_lo; /**< Low 32bit of host buf addr */
};
- /* TSO descriptor */
+ /** TSO descriptor */
struct {
- uint16_t mss; /* MSS to be used for LSO */
- uint8_t lso_hdrlen; /* LSO, TCP payload offset */
- uint8_t lso_totsegs; /* LSO, total segments */
- uint8_t l3_offset; /* L3 header offset */
- uint8_t l4_offset; /* L4 header offset */
- uint16_t lso_meta_res; /* Rsvd bits in TSO metadata */
+ uint16_t mss; /**< MSS to be used for LSO */
+ uint8_t lso_hdrlen; /**< LSO, TCP payload offset */
+ uint8_t lso_totsegs; /**< LSO, total segments */
+ uint8_t l3_offset; /**< L3 header offset */
+ uint8_t l4_offset; /**< L4 header offset */
+ uint16_t lso_meta_res; /**< Rsvd bits in TSO metadata */
};
- /* Metadata descriptor */
+ /** Metadata descriptor */
struct {
- uint8_t flags; /* TX Flags, see @NFDK_DESC_TX_* */
- uint8_t reserved[7]; /* meta byte placeholder */
+ uint8_t flags; /**< TX Flags, see @NFDK_DESC_TX_* */
+ uint8_t reserved[7]; /**< Meta byte place holder */
};
uint32_t vals[2];
@@ -146,13 +146,14 @@ nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq)
(free_desc - NFDK_TX_DESC_STOP_CNT) : 0;
}
-/*
- * nfp_net_nfdk_txq_full() - Check if the TX queue free descriptors
- * is below tx_free_threshold for firmware of nfdk
- *
- * @txq: TX queue to check
+/**
+ * Check if the TX queue free descriptors is below tx_free_threshold
+ * for firmware of nfdk
*
* This function uses the host copy* of read/write pointers.
+ *
+ * @param txq
+ * TX queue to check
*/
static inline bool
nfp_net_nfdk_txq_full(struct nfp_net_txq *txq)
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index dae87ac6df..1289ba1d65 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -478,7 +478,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Free memory prior to re-allocation if needed. This is the case after
- * calling nfp_net_stop
+ * calling nfp_net_stop().
*/
if (dev->data->tx_queues[queue_idx] != NULL) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
@@ -513,7 +513,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_count = nb_desc * NFDK_TX_DESC_PER_SIMPLE_PKT;
txq->tx_free_thresh = tx_free_thresh;
- /* queue mapping based on firmware configuration */
+ /* Queue mapping based on firmware configuration */
txq->qidx = queue_idx;
txq->tx_qcidx = queue_idx * hw->stride_tx;
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
@@ -523,7 +523,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
txq->dma = tz->iova;
txq->ktxds = tz->addr;
- /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+ /* Mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
sizeof(*txq->txbufs) * txq->tx_count,
RTE_CACHE_LINE_SIZE, socket_id);
@@ -539,7 +539,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Telling the HW about the physical address of the TX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index f48e1930dc..130f004b4d 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -55,7 +55,7 @@ struct nfp_xstat {
}
static const struct nfp_xstat nfp_net_xstats[] = {
- /**
+ /*
* Basic xstats available on both VF and PF.
* Note that in case new statistics of group NFP_XSTAT_GROUP_NET
* are added to this array, they must appear before any statistics
@@ -80,7 +80,7 @@ static const struct nfp_xstat nfp_net_xstats[] = {
NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
- /**
+ /*
* MAC xstats available only on PF. These statistics are not available for VFs as the
* PF is not initialized when the VF is initialized as it is still bound to the kernel
* driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
@@ -175,7 +175,7 @@ static void
nfp_net_notify_port_speed(struct nfp_net_hw *hw,
struct rte_eth_link *link)
{
- /**
+ /*
* Read the link status from NFP_NET_CFG_STS. If the link is down
* then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
* NFP_NET_CFG_STS_NSP_LINK_RATE.
@@ -184,7 +184,7 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw,
nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
return;
}
- /**
+ /*
* Link is up so write the link speed from the eth_table to
* NFP_NET_CFG_STS_NSP_LINK_RATE.
*/
@@ -214,7 +214,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw,
nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
wait.tv_sec = 0;
- wait.tv_nsec = 1000000;
+ wait.tv_nsec = 1000000; /* 1ms */
PMD_DRV_LOG(DEBUG, "Polling for update ack...");
@@ -253,7 +253,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw,
*
* @return
* - (0) if OK to reconfigure the device.
- * - (EIO) if I/O err and fail to reconfigure the device.
+ * - (-EIO) if I/O err and fail to reconfigure the device.
*/
int
nfp_net_reconfig(struct nfp_net_hw *hw,
@@ -297,7 +297,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw,
*
* @return
* - (0) if OK to reconfigure the device.
- * - (EIO) if I/O err and fail to reconfigure the device.
+ * - (-EIO) if I/O err and fail to reconfigure the device.
*/
int
nfp_net_ext_reconfig(struct nfp_net_hw *hw,
@@ -368,9 +368,15 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *hw,
}
/*
- * Configure an Ethernet device. This function must be invoked first
- * before any other function in the Ethernet API. This function can
- * also be re-invoked when a device is in the stopped state.
+ * Configure an Ethernet device.
+ *
+ * This function must be invoked first before any other function in the Ethernet API.
+ * This function can also be re-invoked when a device is in the stopped state.
+ *
+ * A DPDK app sends info about how many queues to use and how those queues
+ * need to be configured. This is used by the DPDK core and it makes sure no
+ * more queues than those advertised by the driver are requested.
+ * This function is called after that internal process.
*/
int
nfp_net_configure(struct rte_eth_dev *dev)
@@ -382,14 +388,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /*
- * A DPDK app sends info about how many queues to use and how
- * those queues need to be configured. This is used by the
- * DPDK core and it makes sure no more queues than those
- * advertised by the driver are requested. This function is
- * called after that internal process
- */
-
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
txmode = &dev_conf->txmode;
@@ -557,12 +555,12 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev,
/* Writing new MAC to the specific port BAR address */
nfp_net_write_mac(hw, (uint8_t *)mac_addr);
- /* Signal the NIC about the change */
update = NFP_NET_CFG_UPDATE_MACADDR;
ctrl = hw->ctrl;
if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
+ /* Signal the NIC about the change */
if (nfp_net_reconfig(hw, ctrl, update) != 0) {
PMD_DRV_LOG(ERR, "MAC address update failed");
return -EIO;
@@ -588,7 +586,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
- /* UIO just supports one queue and no LSC*/
+ /* UIO just supports one queue and no LSC */
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
return -1;
@@ -597,8 +595,8 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
for (i = 0; i < dev->data->nb_rx_queues; i++) {
/*
* The first msix vector is reserved for non
- * efd interrupts
- */
+ * efd interrupts.
+ */
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
return -1;
@@ -706,10 +704,6 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
update = NFP_NET_CFG_UPDATE_GEN;
- /*
- * DPDK sets promiscuous mode on just after this call assuming
- * it can not fail ...
- */
ret = nfp_net_reconfig(hw, new_ctrl, update);
if (ret != 0)
return ret;
@@ -737,10 +731,6 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
update = NFP_NET_CFG_UPDATE_GEN;
- /*
- * DPDK sets promiscuous mode off just before this call
- * assuming it can not fail ...
- */
ret = nfp_net_reconfig(hw, new_ctrl, update);
if (ret != 0)
return ret;
@@ -751,7 +741,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
}
/*
- * return 0 means link status changed, -1 means not changed
+ * Return 0 means link status changed, -1 means not changed
*
* Wait to complete is needed as it can take up to 9 seconds to get the Link
* status.
@@ -793,7 +783,7 @@ nfp_net_link_update(struct rte_eth_dev *dev,
}
}
} else {
- /**
+ /*
* Shift and mask nn_link_status so that it is effectively the value
* at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
*/
@@ -812,7 +802,7 @@ nfp_net_link_update(struct rte_eth_dev *dev,
PMD_DRV_LOG(INFO, "NIC Link is Down");
}
- /**
+ /*
* Notify the port to update the speed value in the CTRL BAR from NSP.
* Not applicable for VFs as the associated PF is still attached to the
* kernel driver.
@@ -833,11 +823,9 @@ nfp_net_stats_get(struct rte_eth_dev *dev,
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
-
memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
- /* reading per RX ring stats */
+ /* Reading per RX ring stats */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -855,7 +843,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev,
hw->eth_stats_base.q_ibytes[i];
}
- /* reading per TX ring stats */
+ /* Reading per TX ring stats */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -889,7 +877,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev,
nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
- /* reading general device stats */
+ /* Reading general device stats */
nfp_dev_stats.ierrors =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -915,6 +903,10 @@ nfp_net_stats_get(struct rte_eth_dev *dev,
return -EINVAL;
}
+/*
+ * hw->eth_stats_base records the per counter starting point.
+ * Lets update it now.
+ */
int
nfp_net_stats_reset(struct rte_eth_dev *dev)
{
@@ -923,12 +915,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /*
- * hw->eth_stats_base records the per counter starting point.
- * Lets update it now
- */
-
- /* reading per RX ring stats */
+ /* Reading per RX ring stats */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -940,7 +927,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
}
- /* reading per TX ring stats */
+ /* Reading per TX ring stats */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -964,7 +951,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
hw->eth_stats_base.obytes =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
- /* reading general device stats */
+ /* Reading general device stats */
hw->eth_stats_base.ierrors =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -1032,7 +1019,7 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev,
if (raw)
return value;
- /**
+ /*
* A baseline value of each statistic counter is recorded when stats are "reset".
* Thus, the value returned by this function need to be decremented by this
* baseline value. The result is the count of this statistic since the last time
@@ -1041,12 +1028,12 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev,
return value - hw->eth_xstats_base[index].value;
}
+/* NOTE: All callers ensure dev is always set. */
int
nfp_net_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned int size)
{
- /* NOTE: All callers ensure dev is always set. */
uint32_t id;
uint32_t nfp_size;
uint32_t read_size;
@@ -1066,12 +1053,12 @@ nfp_net_xstats_get_names(struct rte_eth_dev *dev,
return read_size;
}
+/* NOTE: All callers ensure dev is always set. */
int
nfp_net_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats,
unsigned int n)
{
- /* NOTE: All callers ensure dev is always set. */
uint32_t id;
uint32_t nfp_size;
uint32_t read_size;
@@ -1092,16 +1079,16 @@ nfp_net_xstats_get(struct rte_eth_dev *dev,
return read_size;
}
+/*
+ * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
+ * ids, xstats_names and size are valid, and non-NULL.
+ */
int
nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
unsigned int size)
{
- /**
- * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
- * ids, xstats_names and size are valid, and non-NULL.
- */
uint32_t i;
uint32_t read_size;
@@ -1123,16 +1110,16 @@ nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
return read_size;
}
+/*
+ * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
+ * ids, values and n are valid, and non-NULL.
+ */
int
nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
const uint64_t *ids,
uint64_t *values,
unsigned int n)
{
- /**
- * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
- * ids, values and n are valid, and non-NULL.
- */
uint32_t i;
uint32_t read_size;
@@ -1167,10 +1154,7 @@ nfp_net_xstats_reset(struct rte_eth_dev *dev)
hw->eth_xstats_base[id].id = id;
hw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
}
- /**
- * Successfully reset xstats, now call function to reset basic stats
- * return value is then based on the success of that function
- */
+ /* Successfully reset xstats, now call function to reset basic stats. */
return nfp_net_stats_reset(dev);
}
@@ -1217,7 +1201,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
- /*
+ /**
* The maximum rx packet length (max_rx_pktlen) is set to the
* maximum supported frame size that the NFP can handle. This
* includes layer 2 headers, CRC and other metadata that can
@@ -1358,7 +1342,7 @@ nfp_net_common_init(struct rte_pci_device *pci_dev,
nfp_net_init_metadata_format(hw);
- /* read the Rx offset configured from firmware */
+ /* Read the Rx offset configured from firmware */
if (hw->ver.major < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
else
@@ -1375,7 +1359,6 @@ const uint32_t *
nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
- /* refers to nfp_net_set_hash() */
RTE_PTYPE_INNER_L3_IPV4,
RTE_PTYPE_INNER_L3_IPV6,
RTE_PTYPE_INNER_L3_IPV6_EXT,
@@ -1449,10 +1432,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
pci_dev->addr.devid, pci_dev->addr.function);
}
-/* Interrupt configuration and handling */
-
/*
- * nfp_net_irq_unmask - Unmask an interrupt
+ * Unmask an interrupt
*
* If MSI-X auto-masking is enabled clear the mask bit, otherwise
* clear the ICR for the entry.
@@ -1478,16 +1459,14 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
}
}
-/*
+/**
* Interrupt handler which shall be registered for alarm callback for delayed
* handling specific interrupt to wait for the stable nic state. As the NIC
* interrupt state is not stable for nfp after link is just down, it needs
* to wait 4 seconds to get the stable status.
*
- * @param handle Pointer to interrupt handle.
- * @param param The address of parameter (struct rte_eth_dev *)
- *
- * @return void
+ * @param param
+ * The address of parameter (struct rte_eth_dev *)
*/
void
nfp_net_dev_interrupt_delayed_handler(void *param)
@@ -1516,13 +1495,12 @@ nfp_net_dev_interrupt_handler(void *param)
nfp_net_link_update(dev, 0);
- /* likely to up */
+ /* Likely to up */
if (link.link_status == 0) {
- /* handle it 1 sec later, wait it being stable */
+ /* Handle it 1 sec later, wait it being stable */
timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
- /* likely to down */
- } else {
- /* handle it 4 sec later, wait it being stable */
+ } else { /* Likely to down */
+ /* Handle it 4 sec later, wait it being stable */
timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
}
@@ -1543,7 +1521,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* mtu setting is forbidden if port is started */
+ /* MTU setting is forbidden if port is started */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev->data->port_id);
@@ -1557,7 +1535,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
return -ERANGE;
}
- /* writing to configuration space */
+ /* Writing to configuration space */
nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
hw->mtu = mtu;
@@ -1634,7 +1612,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
/*
* Update Redirection Table. There are 128 8bit-entries which can be
- * manage as 32 32bit-entries
+ * manage as 32 32bit-entries.
*/
for (i = 0; i < reta_size; i += 4) {
/* Handling 4 RSS entries per loop */
@@ -1653,8 +1631,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
for (j = 0; j < 4; j++) {
if ((mask & (0x1 << j)) == 0)
continue;
+ /* Clearing the entry bits */
if (mask != 0xF)
- /* Clearing the entry bits */
reta &= ~(0xFF << (8 * j));
reta |= reta_conf[idx].reta[shift + j] << (8 * j);
}
@@ -1689,7 +1667,7 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
return 0;
}
- /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
+/* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
int
nfp_net_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -1717,7 +1695,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
/*
* Reading Redirection Table. There are 128 8bit-entries which can be
- * manage as 32 32bit-entries
+ * manage as 32 32bit-entries.
*/
for (i = 0; i < reta_size; i += 4) {
/* Handling 4 RSS entries per loop */
@@ -1751,7 +1729,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* Writing the key byte a byte */
+ /* Writing the key byte by byte */
for (i = 0; i < rss_conf->rss_key_len; i++) {
memcpy(&key, &rss_conf->rss_key[i], 1);
nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
@@ -1786,7 +1764,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
- /* configuring where to apply the RSS hash */
+ /* Configuring where to apply the RSS hash */
nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
/* Writing the key size */
@@ -1809,7 +1787,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
/* Checking if RSS is enabled */
if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
- if (rss_hf != 0) { /* Enable RSS? */
+ if (rss_hf != 0) {
PMD_DRV_LOG(ERR, "RSS unsupported");
return -EINVAL;
}
@@ -2010,7 +1988,7 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw,
/*
* The firmware with NFD3 can not handle DMA address requiring more
- * than 40 bits
+ * than 40 bits.
*/
int
nfp_net_check_dma_mask(struct nfp_net_hw *hw,
diff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h
index 9cb889c4a6..6a36e2b04c 100644
--- a/drivers/net/nfp/nfp_common.h
+++ b/drivers/net/nfp/nfp_common.h
@@ -53,7 +53,7 @@ enum nfp_app_fw_id {
NFP_APP_FW_FLOWER_NIC = 0x3,
};
-/* nfp_qcp_ptr - Read or Write Pointer of a queue */
+/* Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
NFP_QCP_READ_PTR = 0,
NFP_QCP_WRITE_PTR
@@ -72,15 +72,15 @@ struct nfp_net_tlv_caps {
};
struct nfp_pf_dev {
- /* Backpointer to associated pci device */
+ /** Backpointer to associated pci device */
struct rte_pci_device *pci_dev;
enum nfp_app_fw_id app_fw_id;
- /* Pointer to the app running on the PF */
+ /** Pointer to the app running on the PF */
void *app_fw_priv;
- /* The eth table reported by firmware */
+ /** The eth table reported by firmware */
struct nfp_eth_table *nfp_eth_table;
uint8_t *ctrl_bar;
@@ -94,17 +94,17 @@ struct nfp_pf_dev {
struct nfp_hwinfo *hwinfo;
struct nfp_rtsym_table *sym_tbl;
- /* service id of cpp bridge service */
+ /** Service id of cpp bridge service */
uint32_t cpp_bridge_id;
};
struct nfp_app_fw_nic {
- /* Backpointer to the PF device */
+ /** Backpointer to the PF device */
struct nfp_pf_dev *pf_dev;
- /*
- * Array of physical ports belonging to the this CoreNIC app
- * This is really a list of vNIC's. One for each physical port
+ /**
+ * Array of physical ports belonging to this CoreNIC app.
+ * This is really a list of vNIC's, one for each physical port.
*/
struct nfp_net_hw *ports[NFP_MAX_PHYPORTS];
@@ -113,13 +113,13 @@ struct nfp_app_fw_nic {
};
struct nfp_net_hw {
- /* Backpointer to the PF this port belongs to */
+ /** Backpointer to the PF this port belongs to */
struct nfp_pf_dev *pf_dev;
- /* Backpointer to the eth_dev of this port*/
+ /** Backpointer to the eth_dev of this port */
struct rte_eth_dev *eth_dev;
- /* Info from the firmware */
+ /** Info from the firmware */
struct nfp_net_fw_ver ver;
uint32_t cap;
uint32_t max_mtu;
@@ -130,7 +130,7 @@ struct nfp_net_hw {
/** NFP ASIC params */
const struct nfp_dev_info *dev_info;
- /* Current values for control */
+ /** Current values for control */
uint32_t ctrl;
uint8_t *ctrl_bar;
@@ -156,7 +156,7 @@ struct nfp_net_hw {
struct rte_ether_addr mac_addr;
- /* Records starting point for counters */
+ /** Records starting point for counters */
struct rte_eth_stats eth_stats_base;
struct rte_eth_xstat *eth_xstats_base;
@@ -166,9 +166,9 @@ struct nfp_net_hw {
uint8_t *mac_stats_bar;
uint8_t *mac_stats;
- /* Sequential physical port number, only valid for CoreNIC firmware */
+ /** Sequential physical port number, only valid for CoreNIC firmware */
uint8_t idx;
- /* Internal port number as seen from NFP */
+ /** Internal port number as seen from NFP */
uint8_t nfp_idx;
struct nfp_net_tlv_caps tlv_caps;
@@ -240,10 +240,6 @@ nn_writeq(uint64_t val,
nn_writel(val, addr);
}
-/*
- * Functions to read/write from/to Config BAR
- * Performs any endian conversion necessary.
- */
static inline uint8_t
nn_cfg_readb(struct nfp_net_hw *hw,
uint32_t off)
@@ -304,11 +300,15 @@ nn_cfg_writeq(struct nfp_net_hw *hw,
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/*
- * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
- * @q: Base address for queue structure
- * @ptr: Add to the Read or Write pointer
- * @val: Value to add to the queue pointer
+/**
+ * Add the value to the selected pointer of a queue.
+ *
+ * @param q
+ * Base address for queue structure
+ * @param ptr
+ * Add to the read or write pointer
+ * @param val
+ * Value to add to the queue pointer
*/
static inline void
nfp_qcp_ptr_add(uint8_t *q,
@@ -325,10 +325,13 @@ nfp_qcp_ptr_add(uint8_t *q,
nn_writel(rte_cpu_to_le_32(val), q + off);
}
-/*
- * nfp_qcp_read - Read the current Read/Write pointer value for a queue
- * @q: Base address for queue structure
- * @ptr: Read or Write pointer
+/**
+ * Read the current read/write pointer value for a queue.
+ *
+ * @param q
+ * Base address for queue structure
+ * @param ptr
+ * Read or Write pointer
*/
static inline uint32_t
nfp_qcp_read(uint8_t *q,
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index 222cfdcbc3..8f5271cde9 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2014-2021 Netronome Systems, Inc.
* All rights reserved.
- *
- * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
*/
#include "nfp_cpp_bridge.h"
@@ -48,7 +46,7 @@ nfp_map_service(uint32_t service_id)
/*
* Find a service core with the least number of services already
- * registered to it
+ * registered to it.
*/
while (slcore_count--) {
service_count = rte_service_lcore_count_services(slcore_array[slcore_count]);
@@ -100,7 +98,7 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev)
pf_dev->cpp_bridge_id = service_id;
PMD_INIT_LOG(INFO, "NFP cpp service registered");
- /* Map it to available service core*/
+ /* Map it to available service core */
ret = nfp_map_service(service_id);
if (ret != 0) {
PMD_INIT_LOG(DEBUG, "Could not map nfp cpp service");
diff --git a/drivers/net/nfp/nfp_ctrl.h b/drivers/net/nfp/nfp_ctrl.h
index 55073c3cea..cd0a2f92a8 100644
--- a/drivers/net/nfp/nfp_ctrl.h
+++ b/drivers/net/nfp/nfp_ctrl.h
@@ -20,7 +20,7 @@
/* Offset in Freelist buffer where packet starts on RX */
#define NFP_NET_RX_OFFSET 32
-/* working with metadata api (NFD version > 3.0) */
+/* Working with metadata api (NFD version > 3.0) */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1)
#define NFP_NET_META_HEADER_SIZE 4
@@ -36,14 +36,14 @@
NFP_NET_META_VLAN_TPID_MASK)
/* Prepend field types */
-#define NFP_NET_META_HASH 1 /* next field carries hash type */
+#define NFP_NET_META_HASH 1 /* Next field carries hash type */
#define NFP_NET_META_VLAN 4
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_IPSEC 9
#define NFP_META_PORT_ID_CTRL ~0U
-/* Hash type pre-pended when a RSS hash was computed */
+/* Hash type prepended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
#define NFP_NET_RSS_IPV6 2
@@ -102,7 +102,7 @@
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
-#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring */
#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
#define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN insert with metadata */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
@@ -111,7 +111,7 @@
#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
#define NFP_NET_CFG_CTRL_CSUM_COMPLETE (0x1 << 30) /* Checksum complete */
-#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31)/* live MAC addr change */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31) /* Live MAC addr change */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -124,7 +124,7 @@
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
-#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /**< Mailbox update */
+#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_ERR (0x1U << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -205,7 +205,7 @@ struct nfp_net_fw_ver {
* @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix)
*/
#define NFP_NET_CFG_SPARE_ADDR 0x0050
-/**
+/*
* NFP6000/NFP4000 - Prepend configuration
*/
#define NFP_NET_CFG_RX_OFFSET 0x0050
@@ -280,7 +280,7 @@ struct nfp_net_fw_ver {
* @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
* @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
* @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * @NFP_NET_CFG_TXR_SZ: Per TX ring size (1B entries)
* @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
* @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
* @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)
@@ -299,7 +299,7 @@ struct nfp_net_fw_ver {
* RX ring configuration (0x0800 - 0x0c00)
* @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
* @NFP_NET_CFG_RXR_ADDR: Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ: Per TX ring ring size (1B entries)
+ * @NFP_NET_CFG_RXR_SZ: Per TX ring size (1B entries)
* @NFP_NET_CFG_RXR_VEC: Per TX ring MSI-X table entry (1B entries)
* @NFP_NET_CFG_RXR_PRIO: Per TX ring priority (1B entries)
* @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)
@@ -330,7 +330,7 @@ struct nfp_net_fw_ver {
/*
* General device stats (0x0d00 - 0x0d90)
- * all counters are 64bit.
+ * All counters are 64bit.
*/
#define NFP_NET_CFG_STATS_BASE 0x0d00
#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00)
@@ -364,7 +364,7 @@ struct nfp_net_fw_ver {
/*
* Per ring stats (0x1000 - 0x1800)
- * options, 64bit per entry
+ * Options, 64bit per entry
* @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
@@ -375,9 +375,9 @@ struct nfp_net_fw_ver {
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
-/**
+/*
* Mac stats (0x0000 - 0x0200)
- * all counters are 64bit.
+ * All counters are 64bit.
*/
#define NFP_MAC_STATS_BASE 0x0000
#define NFP_MAC_STATS_SIZE 0x0200
@@ -558,9 +558,11 @@ struct nfp_net_fw_ver {
int nfp_net_tlv_caps_parse(struct rte_eth_dev *dev);
-/*
- * nfp_net_cfg_ctrl_rss() - Get RSS flag based on firmware's capability
- * @hw_cap: The firmware's capabilities
+/**
+ * Get RSS flag based on firmware's capability
+ *
+ * @param hw_cap
+ * The firmware's capabilities
*/
static inline uint32_t
nfp_net_cfg_ctrl_rss(uint32_t hw_cap)
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 72abc4c16e..1651ac2455 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -66,7 +66,7 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
- /* check and configure queue intr-vector mapping */
+ /* Check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
if (app_fw_nic->multiport) {
PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
@@ -76,7 +76,7 @@ nfp_net_start(struct rte_eth_dev *dev)
if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
/*
* Better not to share LSC with RX interrupts.
- * Unregistering LSC interrupt handler
+ * Unregistering LSC interrupt handler.
*/
rte_intr_callback_unregister(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)dev);
@@ -150,7 +150,7 @@ nfp_net_start(struct rte_eth_dev *dev)
/*
* Allocating rte mbufs for configured rx queues.
- * This requires queues being enabled before
+ * This requires queues being enabled before.
*/
if (nfp_net_rx_freelist_setup(dev) != 0) {
ret = -ENOMEM;
@@ -273,11 +273,11 @@ nfp_net_close(struct rte_eth_dev *dev)
/* Clear ipsec */
nfp_ipsec_uninit(dev);
- /* Cancel possible impending LSC work here before releasing the port*/
+ /* Cancel possible impending LSC work here before releasing the port */
rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
/* Only free PF resources after all physical ports have been closed */
- /* Mark this port as unused and free device priv resources*/
+ /* Mark this port as unused and free device priv resources */
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
app_fw_nic->ports[hw->idx] = NULL;
rte_eth_dev_release_port(dev);
@@ -300,15 +300,10 @@ nfp_net_close(struct rte_eth_dev *dev)
rte_intr_disable(pci_dev->intr_handle);
- /* unregister callback func from eal lib */
+ /* Unregister callback func from eal lib */
rte_intr_callback_unregister(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)dev);
- /*
- * The ixgbe PMD disables the pcie master on the
- * device. The i40e does not...
- */
-
return 0;
}
@@ -497,7 +492,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
/*
* Use PF array of physical ports to get pointer to
- * this specific port
+ * this specific port.
*/
hw = app_fw_nic->ports[port];
@@ -779,7 +774,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
/*
* For coreNIC the number of vNICs exposed should be the same as the
- * number of physical ports
+ * number of physical ports.
*/
if (total_vnics != nfp_eth_table->count) {
PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
@@ -787,7 +782,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
goto app_cleanup;
}
- /* Populate coreNIC app properties*/
+ /* Populate coreNIC app properties */
app_fw_nic->total_phyports = total_vnics;
app_fw_nic->pf_dev = pf_dev;
if (total_vnics > 1)
@@ -842,8 +837,9 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
eth_dev->device = &pf_dev->pci_dev->device;
- /* ctrl/tx/rx BAR mappings and remaining init happens in
- * nfp_net_init
+ /*
+ * Ctrl/tx/rx BAR mappings and remaining init happens in
+ * @nfp_net_init()
*/
ret = nfp_net_init(eth_dev);
if (ret != 0) {
@@ -970,7 +966,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
pf_dev->pci_dev = pci_dev;
pf_dev->nfp_eth_table = nfp_eth_table;
- /* configure access to tx/rx vNIC BARs */
+ /* Configure access to tx/rx vNIC BARs */
addr = nfp_qcp_queue_offset(dev_info, 0);
cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
@@ -986,7 +982,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
/*
* PF initialization has been done at this point. Call app specific
- * init code now
+ * init code now.
*/
switch (pf_dev->app_fw_id) {
case NFP_APP_FW_CORE_NIC:
@@ -1011,7 +1007,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
goto hwqueues_cleanup;
}
- /* register the CPP bridge service here for primary use */
+ /* Register the CPP bridge service here for primary use */
ret = nfp_enable_cpp_service(pf_dev);
if (ret != 0)
PMD_INIT_LOG(INFO, "Enable cpp service failed.");
@@ -1079,7 +1075,7 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
/*
* When attaching to the NFP4000/6000 PF on a secondary process there
* is no need to initialise the PF again. Only minimal work is required
- * here
+ * here.
*/
static int
nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
@@ -1119,7 +1115,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
/*
* We don't have access to the PF created in the primary process
- * here so we have to read the number of ports from firmware
+ * here so we have to read the number of ports from firmware.
*/
sym_tbl = nfp_rtsym_table_read(cpp);
if (sym_tbl == NULL) {
@@ -1216,7 +1212,7 @@ nfp_pci_uninit(struct rte_eth_dev *eth_dev)
rte_eth_dev_close(port_id);
/*
* Ports can be closed and freed but hotplugging is not
- * currently supported
+ * currently supported.
*/
return -ENOTSUP;
}
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index d3c3c9e953..c9e72dd953 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -47,12 +47,12 @@ nfp_netvf_start(struct rte_eth_dev *dev)
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
- /* check and configure queue intr-vector mapping */
+ /* Check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
/*
* Better not to share LSC with RX interrupts.
- * Unregistering LSC interrupt handler
+ * Unregistering LSC interrupt handler.
*/
rte_intr_callback_unregister(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)dev);
@@ -101,7 +101,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
/*
* Allocating rte mbufs for configured rx queues.
- * This requires queues being enabled before
+ * This requires queues being enabled before.
*/
if (nfp_net_rx_freelist_setup(dev) != 0) {
ret = -ENOMEM;
@@ -182,18 +182,13 @@ nfp_netvf_close(struct rte_eth_dev *dev)
rte_intr_disable(pci_dev->intr_handle);
- /* unregister callback func from eal lib */
+ /* Unregister callback func from eal lib */
rte_intr_callback_unregister(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)dev);
- /* Cancel possible impending LSC work here before releasing the port*/
+ /* Cancel possible impending LSC work here before releasing the port */
rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
- /*
- * The ixgbe PMD disables the pcie master on the
- * device. The i40e does not...
- */
-
return 0;
}
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 84b48daf85..fbcdb3d19e 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -108,21 +108,21 @@
#define NVGRE_V4_LEN (sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_ipv4_hdr) + \
sizeof(struct rte_flow_item_gre) + \
- sizeof(rte_be32_t)) /* gre key */
+ sizeof(rte_be32_t)) /* Gre key */
#define NVGRE_V6_LEN (sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_ipv6_hdr) + \
sizeof(struct rte_flow_item_gre) + \
- sizeof(rte_be32_t)) /* gre key */
+ sizeof(rte_be32_t)) /* Gre key */
/* Process structure associated with a flow item */
struct nfp_flow_item_proc {
- /* Bit-mask for fields supported by this PMD. */
+ /** Bit-mask for fields supported by this PMD. */
const void *mask_support;
- /* Bit-mask to use when @p item->mask is not provided. */
+ /** Bit-mask to use when @p item->mask is not provided. */
const void *mask_default;
- /* Size in bytes for @p mask_support and @p mask_default. */
+ /** Size in bytes for @p mask_support and @p mask_default. */
const size_t mask_sz;
- /* Merge a pattern item into a flow rule handle. */
+ /** Merge a pattern item into a flow rule handle. */
int (*merge)(struct nfp_app_fw_flower *app_fw_flower,
struct rte_flow *nfp_flow,
char **mbuf_off,
@@ -130,7 +130,7 @@ struct nfp_flow_item_proc {
const struct nfp_flow_item_proc *proc,
bool is_mask,
bool is_outer_layer);
- /* List of possible subsequent items. */
+ /** List of possible subsequent items. */
const enum rte_flow_item_type *const next_item;
};
@@ -308,12 +308,12 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,
mask_entry = nfp_mask_table_search(priv, mask_data, mask_len);
if (mask_entry == NULL) {
- /* mask entry does not exist, let's create one */
+ /* Mask entry does not exist, let's create one */
ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);
if (ret != 0)
return false;
} else {
- /* mask entry already exist */
+ /* Mask entry already exist */
mask_entry->ref_cnt++;
*mask_id = mask_entry->mask_id;
}
@@ -818,7 +818,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
case RTE_FLOW_ITEM_TYPE_ETH:
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected");
/*
- * eth is set with no specific params.
+ * Eth is set with no specific params.
* NFP does not need this.
*/
if (item->spec == NULL)
@@ -879,7 +879,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv4_udp_tun`
+ * in `struct nfp_flower_ipv4_udp_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv4);
} else if (outer_ip6_flag) {
@@ -889,7 +889,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv6_udp_tun`
+ * in `struct nfp_flower_ipv6_udp_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv6);
} else {
@@ -910,7 +910,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv4_udp_tun`
+ * in `struct nfp_flower_ipv4_udp_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv4);
} else if (outer_ip6_flag) {
@@ -918,7 +918,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv6_udp_tun`
+ * in `struct nfp_flower_ipv6_udp_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv6);
} else {
@@ -939,7 +939,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv4_gre_tun`
+ * in `struct nfp_flower_ipv4_gre_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv4);
} else if (outer_ip6_flag) {
@@ -947,7 +947,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
/*
* The outer l3 layer information is
- * in `struct nfp_flower_ipv6_gre_tun`
+ * in `struct nfp_flower_ipv6_gre_tun`.
*/
key_ls->key_size -= sizeof(struct nfp_flower_ipv6);
} else {
@@ -1309,8 +1309,8 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
}
/*
- * reserve space for L4 info.
- * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4
+ * Reserve space for L4 info.
+ * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4.
*/
if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
*mbuf_off += sizeof(struct nfp_flower_tp_ports);
@@ -1392,8 +1392,8 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
}
/*
- * reserve space for L4 info.
- * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv6
+ * Reserve space for L4 info.
+ * rte_flow has ipv6 before L4 but NFP flower fw requires L4 before ipv6.
*/
if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
*mbuf_off += sizeof(struct nfp_flower_tp_ports);
@@ -2127,7 +2127,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor,
if (nfp_flow_tcp_flag_check(items))
nfp_flow->tcp_flag = true;
- /* Check if this is a tunnel flow and get the inner item*/
+ /* Check if this is a tunnel flow and get the inner item */
is_tun_flow = nfp_flow_inner_item_get(items, &loop_item);
if (is_tun_flow)
is_outer_layer = false;
@@ -3366,9 +3366,9 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower,
return -EINVAL;
}
- /* Pre_tunnel action must be the first on action list.
- * If other actions already exist, they need to be
- * pushed forward.
+ /*
+ * Pre_tunnel action must be the first on action list.
+ * If other actions already exist, they need to be pushed forward.
*/
act_len = act_data - actions;
if (act_len != 0) {
@@ -4384,7 +4384,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
goto free_mask_id;
}
- /* flow stats */
+ /* Flow stats */
rte_spinlock_init(&priv->stats_lock);
stats_size = (ctx_count & NFP_FL_STAT_ID_STAT) |
((ctx_split - 1) & NFP_FL_STAT_ID_MU_NUM);
@@ -4398,7 +4398,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
goto free_stats_id;
}
- /* mask table */
+ /* Mask table */
mask_hash_params.hash_func_init_val = priv->hash_seed;
priv->mask_table = rte_hash_create(&mask_hash_params);
if (priv->mask_table == NULL) {
@@ -4407,7 +4407,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
goto free_stats;
}
- /* flow table */
+ /* Flow table */
flow_hash_params.hash_func_init_val = priv->hash_seed;
flow_hash_params.entries = ctx_count;
priv->flow_table = rte_hash_create(&flow_hash_params);
@@ -4417,7 +4417,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
goto free_mask_table;
}
- /* pre tunnel table */
+ /* Pre tunnel table */
priv->pre_tun_cnt = 1;
pre_tun_hash_params.hash_func_init_val = priv->hash_seed;
priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params);
@@ -4446,15 +4446,15 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
goto free_ct_zone_table;
}
- /* ipv4 off list */
+ /* IPv4 off list */
rte_spinlock_init(&priv->ipv4_off_lock);
LIST_INIT(&priv->ipv4_off_list);
- /* ipv6 off list */
+ /* IPv6 off list */
rte_spinlock_init(&priv->ipv6_off_lock);
LIST_INIT(&priv->ipv6_off_list);
- /* neighbor next list */
+ /* Neighbor next list */
LIST_INIT(&priv->nn_list);
return 0;
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index ed06eca371..ab38dbe1f4 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -126,19 +126,19 @@ struct nfp_ipv6_addr_entry {
struct nfp_flow_priv {
uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
uint64_t flower_version; /**< Flow version, always increase. */
- /* mask hash table */
+ /* Mask hash table */
struct nfp_fl_mask_id mask_ids; /**< Entry for mask hash table */
struct rte_hash *mask_table; /**< Hash table to store mask ids. */
- /* flow hash table */
+ /* Flow hash table */
struct rte_hash *flow_table; /**< Hash table to store flow rules. */
- /* flow stats */
+ /* Flow stats */
uint32_t active_mem_unit; /**< The size of active mem units. */
uint32_t total_mem_units; /**< The size of total mem units. */
uint32_t stats_ring_size; /**< The size of stats id ring. */
struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */
struct nfp_fl_stats *stats; /**< Store stats of flow. */
rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */
- /* pre tunnel rule */
+ /* Pre tunnel rule */
uint16_t pre_tun_cnt; /**< The size of pre tunnel rule */
uint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */
struct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */
@@ -148,7 +148,7 @@ struct nfp_flow_priv {
/* IPv6 off */
LIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */
rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
- /* neighbor next */
+ /* Neighbor next */
LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
/* Conntrack */
struct rte_hash *ct_zone_table; /**< Hash table to store ct zone entry */
diff --git a/drivers/net/nfp/nfp_ipsec.h b/drivers/net/nfp/nfp_ipsec.h
index aaebb80fe1..d7a729398a 100644
--- a/drivers/net/nfp/nfp_ipsec.h
+++ b/drivers/net/nfp/nfp_ipsec.h
@@ -82,7 +82,7 @@ struct ipsec_discard_stats {
uint32_t discards_alignment; /**< Alignment error */
uint32_t discards_hard_bytelimit; /**< Hard byte Count limit */
uint32_t discards_seq_num_wrap; /**< Sequ Number wrap */
- uint32_t discards_pmtu_exceeded; /**< PMTU Limit exceeded*/
+ uint32_t discards_pmtu_exceeded; /**< PMTU Limit exceeded */
uint32_t discards_arw_old_seq; /**< Anti-Replay seq small */
uint32_t discards_arw_replay; /**< Anti-Replay seq rcvd */
uint32_t discards_ctrl_word; /**< Bad SA Control word */
@@ -99,16 +99,16 @@ struct ipsec_discard_stats {
struct ipsec_get_sa_stats {
uint32_t seq_lo; /**< Sequence Number (low 32bits) */
- uint32_t seq_high; /**< Sequence Number (high 32bits)*/
+ uint32_t seq_high; /**< Sequence Number (high 32bits) */
uint32_t arw_counter_lo; /**< Anti-replay wndw cntr */
uint32_t arw_counter_high; /**< Anti-replay wndw cntr */
uint32_t arw_bitmap_lo; /**< Anti-replay wndw bitmap */
uint32_t arw_bitmap_high; /**< Anti-replay wndw bitmap */
uint32_t spare:1;
- uint32_t soft_byte_exceeded :1; /**< Soft lifetime byte cnt exceeded*/
- uint32_t hard_byte_exceeded :1; /**< Hard lifetime byte cnt exceeded*/
- uint32_t soft_time_exceeded :1; /**< Soft lifetime time limit exceeded*/
- uint32_t hard_time_exceeded :1; /**< Hard lifetime time limit exceeded*/
+ uint32_t soft_byte_exceeded :1; /**< Soft lifetime byte cnt exceeded */
+ uint32_t hard_byte_exceeded :1; /**< Hard lifetime byte cnt exceeded */
+ uint32_t soft_time_exceeded :1; /**< Soft lifetime time limit exceeded */
+ uint32_t hard_time_exceeded :1; /**< Hard lifetime time limit exceeded */
uint32_t spare1:27;
uint32_t lifetime_byte_count;
uint32_t pkt_count;
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 5bfdfd28b3..d506682b56 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -20,43 +20,22 @@
/* Maximum number of supported VLANs in parsed form packet metadata. */
#define NFP_META_MAX_VLANS 2
-/*
- * struct nfp_meta_parsed - Record metadata parsed from packet
- *
- * Parsed NFP packet metadata are recorded in this struct. The content is
- * read-only after it have been recorded during parsing by nfp_net_parse_meta().
- *
- * @port_id: Port id value
- * @sa_idx: IPsec SA index
- * @hash: RSS hash value
- * @hash_type: RSS hash type
- * @ipsec_type: IPsec type
- * @vlan_layer: The layers of VLAN info which are passed from nic.
- * Only this number of entries of the @vlan array are valid.
- *
- * @vlan: Holds information parses from NFP_NET_META_VLAN. The inner most vlan
- * starts at position 0 and only @vlan_layer entries contain valid
- * information.
- *
- * Currently only 2 layers of vlan are supported,
- * vlan[0] - vlan strip info
- * vlan[1] - qinq strip info
- *
- * @vlan.offload: Flag indicates whether VLAN is offloaded
- * @vlan.tpid: Vlan TPID
- * @vlan.tci: Vlan TCI including PCP + Priority + VID
- */
+/* Record metadata parsed from packet */
struct nfp_meta_parsed {
- uint32_t port_id;
- uint32_t sa_idx;
- uint32_t hash;
- uint8_t hash_type;
- uint8_t ipsec_type;
- uint8_t vlan_layer;
+ uint32_t port_id; /**< Port id value */
+ uint32_t sa_idx; /**< IPsec SA index */
+ uint32_t hash; /**< RSS hash value */
+ uint8_t hash_type; /**< RSS hash type */
+ uint8_t ipsec_type; /**< IPsec type */
+ uint8_t vlan_layer; /**< The valid number of value in @vlan[] */
+ /**
+ * Holds information parses from NFP_NET_META_VLAN.
+ * The inner most vlan starts at position 0
+ */
struct {
- uint8_t offload;
- uint8_t tpid;
- uint16_t tci;
+ uint8_t offload; /**< Flag indicates whether VLAN is offloaded */
+ uint8_t tpid; /**< Vlan TPID */
+ uint16_t tci; /**< Vlan TCI (PCP + Priority + VID) */
} vlan[NFP_META_MAX_VLANS];
};
@@ -156,7 +135,7 @@ struct nfp_ptype_parsed {
uint8_t outer_l3_ptype; /**< Packet type of outer layer 3. */
};
-/* set mbuf checksum flags based on RX descriptor flags */
+/* Set mbuf checksum flags based on RX descriptor flags */
void
nfp_net_rx_cksum(struct nfp_net_rxq *rxq,
struct nfp_net_rx_desc *rxd,
@@ -254,7 +233,7 @@ nfp_net_rx_queue_count(void *rx_queue)
* descriptors and counting all four if the first has the DD
* bit on. Of course, this is not accurate but can be good for
* performance. But ideally that should be done in descriptors
- * chunks belonging to the same cache line
+ * chunks belonging to the same cache line.
*/
while (count < rxq->rx_count) {
@@ -265,7 +244,7 @@ nfp_net_rx_queue_count(void *rx_queue)
count++;
idx++;
- /* Wrapping? */
+ /* Wrapping */
if ((idx) == rxq->rx_count)
idx = 0;
}
@@ -273,7 +252,7 @@ nfp_net_rx_queue_count(void *rx_queue)
return count;
}
-/* nfp_net_parse_chained_meta() - Parse the chained metadata from packet */
+/* Parse the chained metadata from packet */
static bool
nfp_net_parse_chained_meta(uint8_t *meta_base,
rte_be32_t meta_header,
@@ -320,12 +299,7 @@ nfp_net_parse_chained_meta(uint8_t *meta_base,
return true;
}
-/*
- * nfp_net_parse_meta_hash() - Set mbuf hash data based on the metadata info
- *
- * The RSS hash and hash-type are prepended to the packet data.
- * Extract and decode it and set the mbuf fields.
- */
+/* Set mbuf hash data based on the metadata info */
static void
nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta,
struct nfp_net_rxq *rxq,
@@ -341,7 +315,7 @@ nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta,
}
/*
- * nfp_net_parse_single_meta() - Parse the single metadata
+ * Parse the single metadata
*
* The RSS hash and hash-type are prepended to the packet data.
* Get it from metadata area.
@@ -355,12 +329,7 @@ nfp_net_parse_single_meta(uint8_t *meta_base,
meta->hash = rte_be_to_cpu_32(*(rte_be32_t *)(meta_base + 4));
}
-/*
- * nfp_net_parse_meta_vlan() - Set mbuf vlan_strip data based on metadata info
- *
- * The VLAN info TPID and TCI are prepended to the packet data.
- * Extract and decode it and set the mbuf fields.
- */
+/* Set mbuf vlan_strip data based on metadata info */
static void
nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
struct nfp_net_rx_desc *rxd,
@@ -369,19 +338,14 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
{
struct nfp_net_hw *hw = rxq->hw;
- /* Skip if hardware don't support setting vlan. */
+ /* Skip if firmware don't support setting vlan. */
if ((hw->ctrl & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) == 0)
return;
/*
- * The nic support the two way to send the VLAN info,
- * 1. According the metadata to send the VLAN info when NFP_NET_CFG_CTRL_RXVLAN_V2
- * is set
- * 2. According the descriptor to sned the VLAN info when NFP_NET_CFG_CTRL_RXVLAN
- * is set
- *
- * If the nic doesn't send the VLAN info, it is not necessary
- * to do anything.
+ * The firmware support two ways to send the VLAN info (with priority) :
+ * 1. Using the metadata when NFP_NET_CFG_CTRL_RXVLAN_V2 is set,
+ * 2. Using the descriptor when NFP_NET_CFG_CTRL_RXVLAN is set.
*/
if ((hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) {
if (meta->vlan_layer > 0 && meta->vlan[0].offload != 0) {
@@ -397,7 +361,7 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
}
/*
- * nfp_net_parse_meta_qinq() - Set mbuf qinq_strip data based on metadata info
+ * Set mbuf qinq_strip data based on metadata info
*
* The out VLAN tci are prepended to the packet data.
* Extract and decode it and set the mbuf fields.
@@ -469,7 +433,7 @@ nfp_net_parse_meta_ipsec(struct nfp_meta_parsed *meta,
}
}
-/* nfp_net_parse_meta() - Parse the metadata from packet */
+/* Parse the metadata from packet */
static void
nfp_net_parse_meta(struct nfp_net_rx_desc *rxds,
struct nfp_net_rxq *rxq,
@@ -672,7 +636,7 @@ nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds,
* doing now have any benefit at all. Again, tests with this change have not
* shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
* so looking at the implications of this type of allocation should be studied
- * deeply
+ * deeply.
*/
uint16_t
@@ -695,7 +659,7 @@ nfp_net_recv_pkts(void *rx_queue,
if (unlikely(rxq == NULL)) {
/*
* DPDK just checks the queue is lower than max queues
- * enabled. But the queue needs to be configured
+ * enabled. But the queue needs to be configured.
*/
PMD_RX_LOG(ERR, "RX Bad queue");
return 0;
@@ -722,7 +686,7 @@ nfp_net_recv_pkts(void *rx_queue,
/*
* We got a packet. Let's alloc a new mbuf for refilling the
- * free descriptor ring as soon as possible
+ * free descriptor ring as soon as possible.
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
@@ -734,7 +698,7 @@ nfp_net_recv_pkts(void *rx_queue,
/*
* Grab the mbuf and refill the descriptor with the
- * previously allocated mbuf
+ * previously allocated mbuf.
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
@@ -751,7 +715,7 @@ nfp_net_recv_pkts(void *rx_queue,
/*
* This should not happen and the user has the
* responsibility of avoiding it. But we have
- * to give some info about the error
+ * to give some info about the error.
*/
PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset.\n"
"\t\tYour mbuf size should have extra space for"
@@ -803,7 +767,7 @@ nfp_net_recv_pkts(void *rx_queue,
nb_hold++;
rxq->rd_p++;
- if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */
rxq->rd_p = 0;
}
@@ -817,7 +781,7 @@ nfp_net_recv_pkts(void *rx_queue,
/*
* FL descriptors needs to be written before incrementing the
- * FL queue WR pointer
+ * FL queue WR pointer.
*/
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
@@ -898,7 +862,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Free memory prior to re-allocation if needed. This is the case after
- * calling nfp_net_stop
+ * calling @nfp_net_stop().
*/
if (dev->data->rx_queues[queue_idx] != NULL) {
nfp_net_rx_queue_release(dev, queue_idx);
@@ -920,7 +884,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Tracking mbuf size for detecting a potential mbuf overflow due to
- * RX offset
+ * RX offset.
*/
rxq->mem_pool = mp;
rxq->mbuf_size = rxq->mem_pool->elt_size;
@@ -951,7 +915,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dma = (uint64_t)tz->iova;
rxq->rxds = tz->addr;
- /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+ /* Mbuf pointers array for referencing mbufs linked to RX descriptors */
rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
sizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE,
socket_id);
@@ -967,7 +931,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Telling the HW about the physical address of the RX ring and number
- * of descriptors in log2 format
+ * of descriptors in log2 format.
*/
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
@@ -975,11 +939,14 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-/*
- * nfp_net_tx_free_bufs - Check for descriptors with a complete
- * status
- * @txq: TX queue to work with
- * Returns number of descriptors freed
+/**
+ * Check for descriptors with a complete status
+ *
+ * @param txq
+ * TX queue to work with
+ *
+ * @return
+ * Number of descriptors freed
*/
uint32_t
nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index 98ef6c3d93..899cc42c97 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -19,21 +19,11 @@
/* Maximum number of NFP packet metadata fields. */
#define NFP_META_MAX_FIELDS 8
-/*
- * struct nfp_net_meta_raw - Raw memory representation of packet metadata
- *
- * Describe the raw metadata format, useful when preparing metadata for a
- * transmission mbuf.
- *
- * @header: NFD3 or NFDk field type header (see format in nfp.rst)
- * @data: Array of each fields data member
- * @length: Keep track of number of valid fields in @header and data. Not part
- * of the raw metadata.
- */
+/* Describe the raw metadata format. */
struct nfp_net_meta_raw {
- uint32_t header;
- uint32_t data[NFP_META_MAX_FIELDS];
- uint8_t length;
+ uint32_t header; /**< Field type header (see format in nfp.rst) */
+ uint32_t data[NFP_META_MAX_FIELDS]; /**< Array of each fields data member */
+ uint8_t length; /**< Number of valid fields in @header */
};
/* Descriptor alignment */
--
2.39.1
next prev parent reply other threads:[~2023-10-12 1:28 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-07 2:33 [PATCH 00/11] Unify the PMD coding style Chaoyong He
2023-10-07 2:33 ` [PATCH 01/11] net/nfp: explicitly compare to null and 0 Chaoyong He
2023-10-07 2:33 ` [PATCH 02/11] net/nfp: unify the indent coding style Chaoyong He
2023-10-07 2:33 ` [PATCH 03/11] net/nfp: unify the type of integer variable Chaoyong He
2023-10-07 2:33 ` [PATCH 04/11] net/nfp: standard the local variable coding style Chaoyong He
2023-10-07 2:33 ` [PATCH 05/11] net/nfp: adjust the log statement Chaoyong He
2023-10-07 2:33 ` [PATCH 06/11] net/nfp: standard the comment style Chaoyong He
2023-10-07 2:33 ` [PATCH 07/11] net/nfp: standard the blank character Chaoyong He
2023-10-07 2:33 ` [PATCH 08/11] net/nfp: unify the guide line of header file Chaoyong He
2023-10-07 2:33 ` [PATCH 09/11] net/nfp: rename some parameter and variable Chaoyong He
2023-10-07 2:33 ` [PATCH 10/11] net/nfp: adjust logic to make it more readable Chaoyong He
2023-10-07 2:33 ` [PATCH 11/11] net/nfp: refact the meson build file Chaoyong He
2023-10-12 1:26 ` [PATCH v2 00/11] Unify the PMD coding style Chaoyong He
2023-10-12 1:26 ` [PATCH v2 01/11] net/nfp: explicitly compare to null and 0 Chaoyong He
2023-10-12 1:26 ` [PATCH v2 02/11] net/nfp: unify the indent coding style Chaoyong He
2023-10-12 1:26 ` [PATCH v2 03/11] net/nfp: unify the type of integer variable Chaoyong He
2023-10-12 1:26 ` [PATCH v2 04/11] net/nfp: standard the local variable coding style Chaoyong He
2023-10-12 1:26 ` [PATCH v2 05/11] net/nfp: adjust the log statement Chaoyong He
2023-10-12 1:38 ` Stephen Hemminger
2023-10-12 1:40 ` Chaoyong He
2023-10-12 1:26 ` Chaoyong He [this message]
2023-10-12 1:27 ` [PATCH v2 07/11] net/nfp: standard the blank character Chaoyong He
2023-10-12 1:27 ` [PATCH v2 08/11] net/nfp: unify the guide line of header file Chaoyong He
2023-10-12 1:27 ` [PATCH v2 09/11] net/nfp: rename some parameter and variable Chaoyong He
2023-10-12 1:27 ` [PATCH v2 10/11] net/nfp: adjust logic to make it more readable Chaoyong He
2023-10-12 1:27 ` [PATCH v2 11/11] net/nfp: refact the meson build file Chaoyong He
2023-10-13 6:06 ` [PATCH v3 00/11] Unify the PMD coding style Chaoyong He
2023-10-13 6:06 ` [PATCH v3 01/11] net/nfp: explicitly compare to null and 0 Chaoyong He
2023-10-13 6:06 ` [PATCH v3 02/11] net/nfp: unify the indent coding style Chaoyong He
2023-10-13 6:06 ` [PATCH v3 03/11] net/nfp: unify the type of integer variable Chaoyong He
2023-10-13 6:06 ` [PATCH v3 04/11] net/nfp: standard the local variable coding style Chaoyong He
2023-10-13 6:06 ` [PATCH v3 05/11] net/nfp: adjust the log statement Chaoyong He
2023-10-13 6:06 ` [PATCH v3 06/11] net/nfp: standard the comment style Chaoyong He
2023-10-13 6:06 ` [PATCH v3 07/11] net/nfp: standard the blank character Chaoyong He
2023-10-13 6:06 ` [PATCH v3 08/11] net/nfp: unify the guide line of header file Chaoyong He
2023-10-13 6:06 ` [PATCH v3 09/11] net/nfp: rename some parameter and variable Chaoyong He
2023-10-13 6:06 ` [PATCH v3 10/11] net/nfp: adjust logic to make it more readable Chaoyong He
2023-10-13 6:06 ` [PATCH v3 11/11] net/nfp: refact the meson build file Chaoyong He
2023-10-16 16:50 ` Ferruh Yigit
2023-10-16 16:50 ` [PATCH v3 00/11] Unify the PMD coding style Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231012012704.483828-7-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=long.wu@corigine.com \
--cc=oss-drivers@corigine.com \
--cc=peng.zhang@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).