DPDK patches and discussions
 help / color / mirror / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Randy Schacher <stuart.schacher@broadcom.com>
Subject: [dpdk-dev] [PATCH v3 02/34] net/bnxt: update hwrm prep to use ptr
Date: Tue, 14 Apr 2020 13:42:59 +0530	[thread overview]
Message-ID: <1586852011-37536-3-git-send-email-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <1586852011-37536-1-git-send-email-venkatkumar.duvvuru@broadcom.com>

From: Randy Schacher <stuart.schacher@broadcom.com>

- Change HWRM_PREP to use pointer and use the full
  HWRM enum

Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h      |   2 +-
 drivers/net/bnxt/bnxt_hwrm.c | 202 ++++++++++++++++++++++---------------------
 2 files changed, 103 insertions(+), 101 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 3ae08a2..b795ed6 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -594,7 +594,7 @@ struct bnxt {
 
 	uint8_t			mac_addr[RTE_ETHER_ADDR_LEN];
 
-	uint16_t			hwrm_cmd_seq;
+	uint16_t			chimp_cmd_seq;
 	uint16_t			kong_cmd_seq;
 	void				*hwrm_cmd_resp_addr;
 	rte_iova_t			hwrm_cmd_resp_dma_addr;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index a9c9c72..93b2ea7 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -182,19 +182,19 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
  *
  * HWRM_UNLOCK() must be called after all response processing is completed.
  */
-#define HWRM_PREP(req, type, kong) do { \
+#define HWRM_PREP(req, type, kong) do {	\
 	rte_spinlock_lock(&bp->hwrm_lock); \
 	if (bp->hwrm_cmd_resp_addr == NULL) { \
 		rte_spinlock_unlock(&bp->hwrm_lock); \
 		return -EACCES; \
 	} \
 	memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
-	req.req_type = rte_cpu_to_le_16(HWRM_##type); \
-	req.cmpl_ring = rte_cpu_to_le_16(-1); \
-	req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
-		rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
-	req.target_id = rte_cpu_to_le_16(0xffff); \
-	req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
+	(req)->req_type = rte_cpu_to_le_16(type); \
+	(req)->cmpl_ring = rte_cpu_to_le_16(-1); \
+	(req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
+		rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
+	(req)->target_id = rte_cpu_to_le_16(0xffff); \
+	(req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
 } while (0)
 
 #define HWRM_CHECK_RESULT_SILENT() do {\
@@ -263,7 +263,7 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 	req.mask = 0;
 
@@ -288,7 +288,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
 		return rc;
 
-	HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
 	if (vnic->flags & BNXT_VNIC_INFO_BCAST)
@@ -347,7 +347,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 				return 0;
 		}
 	}
-	HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(fid);
 
 	req.vlan_tag_mask_tbl_addr =
@@ -389,7 +389,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
 	if (l2_filter->l2_ref_cnt > 0)
 		return 0;
 
-	HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
 
 	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
 
@@ -440,7 +440,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	if (filter->fw_l2_filter_id != UINT64_MAX)
 		bnxt_hwrm_clear_l2_filter(bp, filter);
 
-	HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
 
 	req.flags = rte_cpu_to_le_32(filter->flags);
 
@@ -503,7 +503,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
 	if (!ptp)
 		return 0;
 
-	HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
 
 	if (ptp->rx_filter)
 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
@@ -536,7 +536,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 	if (ptp)
 		return 0;
 
-	HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
 
 	req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
 
@@ -591,7 +591,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	uint32_t flags;
 	int i;
 
-	HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(0xffff);
 
@@ -721,7 +721,7 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 	struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
 
 	req.target_id = rte_cpu_to_le_16(0xffff);
 
@@ -748,7 +748,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
 	struct hwrm_func_reset_input req = {.req_type = 0 };
 	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
 
 	req.enables = rte_cpu_to_le_32(0);
 
@@ -781,7 +781,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 	if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
 
-	HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
 	req.ver_maj = RTE_VER_YEAR;
@@ -853,7 +853,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	struct hwrm_func_vf_cfg_input req = {0};
 
-	HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
 
 	enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
@@ -919,7 +919,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 	struct hwrm_func_resource_qcaps_input req = {0};
 
-	HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(0xffff);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -964,7 +964,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
 
 	bp->max_req_len = HWRM_MAX_REQ_LEN;
 	bp->hwrm_cmd_timeout = timeout;
-	HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
 
 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -1104,7 +1104,7 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
 	if (!(bp->flags & BNXT_FLAG_REGISTERED))
 		return 0;
 
-	HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
 	req.flags = flags;
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -1122,7 +1122,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
 	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	uint32_t enables = 0;
 
-	HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
 
 	if (conf->link_up) {
 		/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
@@ -1186,7 +1186,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
 	struct hwrm_port_phy_qcfg_input req = {0};
 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -1265,7 +1265,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 	int i;
 
 get_rx_info:
-	HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
 
 	req.flags = rte_cpu_to_le_32(dir);
 	/* HWRM Version >= 1.9.1 only if COS Classification is not required. */
@@ -1353,7 +1353,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 	struct rte_mempool *mb_pool;
 	uint16_t rx_buf_size;
 
-	HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
 
 	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
 	req.fbo = rte_cpu_to_le_32(0);
@@ -1477,7 +1477,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
 	struct hwrm_ring_free_input req = {.req_type = 0 };
 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
 
 	req.ring_type = ring_type;
 	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
@@ -1525,7 +1525,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
 	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
 	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
 
 	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
 	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
@@ -1549,7 +1549,7 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
 	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
 	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
 
 	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
 
@@ -1571,7 +1571,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 	if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
 		return rc;
 
-	HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
 
 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
 
@@ -1590,7 +1590,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
 
 	req.update_period_ms = rte_cpu_to_le_32(0);
 
@@ -1614,7 +1614,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
 	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
 
 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
 
@@ -1648,7 +1648,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 
 skip_ring_grps:
 	vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
-	HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
 
 	if (vnic->func_default)
 		req.flags =
@@ -1671,7 +1671,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
 	struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
 
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
@@ -1704,7 +1704,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
 		return rc;
 	}
 
-	HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
 
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 	req.flags = rte_cpu_to_le_32(pmode->flags);
@@ -1743,7 +1743,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	if (rc)
 		return rc;
 
-	HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
 
 	if (BNXT_CHIP_THOR(bp)) {
 		int dflt_rxq = vnic->start_grp_id;
@@ -1847,7 +1847,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
 		return rc;
 	}
-	HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
 
 	req.enables =
 		rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
@@ -1890,7 +1890,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
 						bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -1919,7 +1919,7 @@ int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
 		PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
 		return rc;
 	}
-	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
 
 	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
 
@@ -1964,7 +1964,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 		return rc;
 	}
 
-	HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
 
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
@@ -1991,7 +1991,7 @@ bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
 	for (i = 0; i < nr_ctxs; i++) {
-		HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@@ -2029,7 +2029,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 	if (BNXT_CHIP_THOR(bp))
 		return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
 
-	HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
 	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
 	req.hash_mode_flags = vnic->hash_mode;
@@ -2062,7 +2062,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
 		return rc;
 	}
 
-	HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
 
 	req.flags = rte_cpu_to_le_32(
 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
@@ -2103,7 +2103,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
 		return 0;
 	}
 
-	HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
 
 	if (enable) {
 		req.enables = rte_cpu_to_le_32(
@@ -2143,7 +2143,7 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
 	memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -2161,7 +2161,7 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
 	struct hwrm_func_qstats_input req = {.req_type = 0};
 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
@@ -2184,7 +2184,7 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
 	struct hwrm_func_qstats_input req = {.req_type = 0};
 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
@@ -2221,7 +2221,7 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
 	struct hwrm_func_clr_stats_input req = {.req_type = 0};
 	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
@@ -2928,7 +2928,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 	uint16_t flags;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(0xffff);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -3037,7 +3037,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(enables);
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3109,7 +3109,7 @@ static int reserve_resources_from_vf(struct bnxt *bp,
 	int rc;
 
 	/* Get the actual allocated values now */
-	HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3147,7 +3147,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 	int rc;
 
 	/* Check for zero MAC address */
-	HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -3165,7 +3165,7 @@ static int update_pf_resource_max(struct bnxt *bp)
 	int rc;
 
 	/* And copy the allocated numbers into the pf struct */
-	HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -3268,7 +3268,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 	for (i = 0; i < num_vfs; i++) {
 		add_random_mac_if_needed(bp, &req, i);
 
-		HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 		req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
 		req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
 		rc = bnxt_hwrm_send_message(bp,
@@ -3324,7 +3324,7 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
 	req.tunnel_type = tunnel_type;
 	req.tunnel_dst_port_val = port;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -3375,7 +3375,7 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
 	struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
 
 	req.tunnel_type = tunnel_type;
 	req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
@@ -3394,7 +3394,7 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.flags = rte_cpu_to_le_32(flags);
@@ -3424,7 +3424,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
 
 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
 	req.req_buf_page_size = rte_cpu_to_le_16(
@@ -3455,7 +3455,7 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
 	if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
 		return 0;
 
-	HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3471,7 +3471,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
@@ -3493,7 +3493,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
 	struct hwrm_func_vf_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
 
 	req.enables = rte_cpu_to_le_32(
 			HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
@@ -3515,7 +3515,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
 	uint32_t func_cfg_flags;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	if (is_vf) {
 		dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
@@ -3547,7 +3547,7 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.enables |= rte_cpu_to_le_32(enables);
@@ -3567,7 +3567,7 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
@@ -3604,7 +3604,7 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
 	if (ec_size > sizeof(req.encap_request))
 		return -1;
 
-	HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
 
 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
 	memcpy(req.encap_request, encaped, ec_size);
@@ -3624,7 +3624,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
-	HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -3648,7 +3648,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 	if (ec_size > sizeof(req.encap_request))
 		return -1;
 
-	HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
 
 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
 	memcpy(req.encap_request, encaped, ec_size);
@@ -3668,7 +3668,7 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
 	struct hwrm_stat_ctx_query_input req = {.req_type = 0};
 	struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
 
 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
 
@@ -3706,7 +3706,7 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
 	struct bnxt_pf_info *pf = &bp->pf;
 	int rc;
 
-	HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
 
 	req.port_id = rte_cpu_to_le_16(pf->port_id);
 	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
@@ -3731,7 +3731,7 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
 	    BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
 		return 0;
 
-	HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
 
 	req.port_id = rte_cpu_to_le_16(pf->port_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -3751,7 +3751,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 	if (BNXT_VF(bp))
 		return 0;
 
-	HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
 	req.port_id = bp->pf.port_id;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3793,7 +3793,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
 	if (!bp->num_leds || BNXT_VF(bp))
 		return -EOPNOTSUPP;
 
-	HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
 
 	if (led_on) {
 		led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
@@ -3826,7 +3826,7 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
 	struct hwrm_nvm_get_dir_info_input req = {0};
 	struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3869,7 +3869,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
 			"unable to map response address to physical memory\n");
 		return -ENOMEM;
 	}
-	HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3903,7 +3903,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
 			"unable to map response address to physical memory\n");
 		return -ENOMEM;
 	}
-	HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
 	req.dir_idx = rte_cpu_to_le_16(index);
 	req.offset = rte_cpu_to_le_32(offset);
@@ -3925,7 +3925,7 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
 	struct hwrm_nvm_erase_dir_entry_input req = {0};
 	struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
 	req.dir_idx = rte_cpu_to_le_16(index);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -3958,7 +3958,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
 	}
 	memcpy(buf, data, data_len);
 
-	HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
 
 	req.dir_type = rte_cpu_to_le_16(dir_type);
 	req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
@@ -4009,7 +4009,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 	int rc;
 
 	/* First query all VNIC ids */
-	HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
 
 	req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
 	req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
@@ -4091,7 +4091,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.enables |= rte_cpu_to_le_32(
@@ -4166,7 +4166,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp,
 	if (filter->fw_em_filter_id != UINT64_MAX)
 		bnxt_hwrm_clear_em_filter(bp, filter);
 
-	HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
+	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
 
 	req.flags = rte_cpu_to_le_32(filter->flags);
 
@@ -4238,7 +4238,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
 	if (filter->fw_em_filter_id == UINT64_MAX)
 		return 0;
 
-	HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
+	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
 
 	req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
 
@@ -4266,7 +4266,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
 	if (filter->fw_ntuple_filter_id != UINT64_MAX)
 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
 
-	HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
 
 	req.flags = rte_cpu_to_le_32(filter->flags);
 
@@ -4346,7 +4346,7 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
 	if (filter->fw_ntuple_filter_id == UINT64_MAX)
 		return 0;
 
-	HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
 
 	req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
 
@@ -4377,7 +4377,7 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 		struct bnxt_rx_ring_info *rxr;
 		struct bnxt_cp_ring_info *cpr;
 
-		HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@@ -4509,7 +4509,7 @@ static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
 	uint16_t flags;
 	int rc;
 
-	HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 
@@ -4546,7 +4546,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
 		return 0;
 	}
 
-	HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req,
+		  HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+		  BNXT_USE_CHIMP_MB);
 	req.ring_id = rte_cpu_to_le_16(ring_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -4571,7 +4573,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 	    bp->ctx)
 		return 0;
 
-	HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT_SILENT();
 
@@ -4650,7 +4652,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
 	if (!ctx)
 		return 0;
 
-	HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
 	req.enables = rte_cpu_to_le_32(enables);
 
 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
@@ -4743,7 +4745,7 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
 	      bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
 		return 0;
 
-	HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
 
 	req.port_id = rte_cpu_to_le_16(pf->port_id);
 	if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
@@ -4784,7 +4786,7 @@ bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
 		bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
 	req.tunnel_type = type;
 	req.dest_fid = bp->fw_fid;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -4803,7 +4805,7 @@ bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
 		bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
 	req.tunnel_type = type;
 	req.dest_fid = bp->fw_fid;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -4821,7 +4823,7 @@ int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
 		bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
 	req.src_fid = bp->fw_fid;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
@@ -4842,7 +4844,7 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
 		bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
 	req.src_fid = bp->fw_fid;
 	req.tunnel_type = tun_type;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -4867,7 +4869,7 @@ int bnxt_hwrm_set_mac(struct bnxt *bp)
 	if (!BNXT_VF(bp))
 		return 0;
 
-	HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
 
 	req.enables =
 		rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
@@ -4900,7 +4902,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 	if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
 		return 0;
 
-	HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
 
 	if (up)
 		req.flags =
@@ -4946,7 +4948,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
 		memset(info, 0, sizeof(*info));
 	}
 
-	HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -5022,7 +5024,7 @@ int bnxt_hwrm_fw_reset(struct bnxt *bp)
 	if (!BNXT_PF(bp))
 		return -EOPNOTSUPP;
 
-	HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
+	HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
 
 	req.embedded_proc_type =
 		HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
@@ -5050,7 +5052,7 @@ int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
 	if (!ptp)
 		return 0;
 
-	HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
 
 	switch (path) {
 	case BNXT_PTP_FLAGS_PATH_TX:
@@ -5098,7 +5100,7 @@ int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
 		return 0;
 	}
 
-	HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
+	HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
 
 	HWRM_CHECK_RESULT();
-- 
2.7.4


  parent reply	other threads:[~2020-04-14  8:13 UTC|newest]

Thread overview: 154+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-17 15:37 [dpdk-dev] [PATCH 00/33] add support for host based flow table management Venkat Duvvuru
2020-03-17 15:37 ` [dpdk-dev] [PATCH 01/33] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 02/33] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 03/33] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 04/33] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 05/33] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 06/33] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 07/33] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 08/33] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 09/33] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 10/33] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 11/33] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 12/33] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 13/33] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 14/33] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 15/33] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 16/33] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 17/33] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 18/33] net/bnxt: add support to process action tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 19/33] net/bnxt: add support to process key tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 20/33] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 21/33] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 22/33] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 23/33] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 24/33] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 25/33] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 26/33] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 27/33] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 28/33] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 29/33] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 30/33] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 31/33] net/bnxt: disable vector mode when BNXT TRUFLOW is enabled Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 32/33] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-03-17 15:38 ` [dpdk-dev] [PATCH 33/33] config: introduce BNXT TRUFLOW config flag Venkat Duvvuru
2020-04-13 19:39 ` [dpdk-dev] [PATCH v2 00/34] add support for host based flow table management Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 02/34] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 08/34] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-13 19:39   ` [dpdk-dev] [PATCH v2 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-13 19:40   ` [dpdk-dev] [PATCH v2 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-13 21:35   ` [dpdk-dev] [PATCH v2 00/34] add support for host based flow table management Thomas Monjalon
2020-04-15  8:56     ` Venkat Duvvuru
2020-04-14  8:12   ` [dpdk-dev] [PATCH v3 " Venkat Duvvuru
2020-04-14  8:12     ` [dpdk-dev] [PATCH v3 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-14  8:12     ` Venkat Duvvuru [this message]
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 08/34] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-14  8:13     ` [dpdk-dev] [PATCH v3 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-15  8:18     ` [dpdk-dev] [PATCH v4 00/34] add support for host based flow table management Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 01/34] net/bnxt: add updated dpdk hsi structure Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 02/34] net/bnxt: update hwrm prep to use ptr Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 03/34] net/bnxt: add truflow message handlers Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 04/34] net/bnxt: add initial tf core session open Venkat Duvvuru
2020-04-16 17:39         ` Ferruh Yigit
2020-04-16 17:47           ` Ajit Khaparde
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 05/34] net/bnxt: add initial tf core session close support Venkat Duvvuru
2020-04-16 17:39         ` Ferruh Yigit
2020-04-16 17:48           ` Ajit Khaparde
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 06/34] net/bnxt: add tf core session sram functions Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 07/34] net/bnxt: add initial tf core resource mgmt support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 08/34] net/bnxt: add resource manager functionality Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 09/34] net/bnxt: add tf core identifier support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 10/34] net/bnxt: add tf core TCAM support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 11/34] net/bnxt: add tf core table scope support Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 12/34] net/bnxt: add EM/EEM functionality Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 13/34] net/bnxt: fetch SVIF information from the firmware Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 14/34] net/bnxt: fetch vnic info from DPDK port Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 15/34] net/bnxt: add devargs parameter for host memory based TRUFLOW feature Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 16/34] net/bnxt: add support for ULP session manager init Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 17/34] net/bnxt: add support for ULP session manager cleanup Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 18/34] net/bnxt: add helper functions for blob/regfile ops Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 19/34] net/bnxt: add support to process action tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 20/34] net/bnxt: add support to process key tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 21/34] net/bnxt: add support to free key and action tables Venkat Duvvuru
2020-04-15  8:18       ` [dpdk-dev] [PATCH v4 22/34] net/bnxt: add support to alloc and program key and act tbls Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 23/34] net/bnxt: match rte flow items with flow template patterns Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 24/34] net/bnxt: match rte flow actions with flow template actions Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 25/34] net/bnxt: add support for rte flow item parsing Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 26/34] net/bnxt: add support for rte flow action parsing Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 27/34] net/bnxt: add support for rte flow create driver hook Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 28/34] net/bnxt: add support for rte flow validate " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 29/34] net/bnxt: add support for rte flow destroy " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 30/34] net/bnxt: add support for rte flow flush " Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 31/34] net/bnxt: register tf rte flow ops Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 32/34] net/bnxt: disable vector mode when host based TRUFLOW is enabled Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 33/34] net/bnxt: add support for injecting mark into packet’s mbuf Venkat Duvvuru
2020-04-15  8:19       ` [dpdk-dev] [PATCH v4 34/34] net/bnxt: enable meson build on truflow code Venkat Duvvuru
2020-04-22 21:27         ` Thomas Monjalon
2020-04-15 15:29       ` [dpdk-dev] [PATCH v4 00/34] add support for host based flow table management Ajit Khaparde
2020-04-16 16:23       ` Ferruh Yigit
2020-04-16 16:38         ` Ajit Khaparde
2020-04-16 17:40       ` Ferruh Yigit
2020-04-16 17:51         ` Ajit Khaparde
2020-04-17  8:37           ` Ferruh Yigit
2020-04-17 11:03             ` Ferruh Yigit
2020-04-17 16:14               ` Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1586852011-37536-3-git-send-email-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=stuart.schacher@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).