From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, thomas@monjalon.net,
Randy Schacher <stuart.schacher@broadcom.com>,
Kishore Padmanabha <kishore.padmanabha@broadcom.com>,
Mike Baucom <michael.baucom@broadcom.com>,
Shuanglin Wang <shuanglin.wang@broadcom.com>
Subject: [PATCH v4 06/11] net/bnxt: add RSS and Queue action in TruFLow
Date: Wed, 28 Jun 2023 09:29:22 -0700 [thread overview]
Message-ID: <20230628162927.92858-7-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20230628162927.92858-1-ajit.khaparde@broadcom.com>
[-- Attachment #1: Type: text/plain, Size: 187167 bytes --]
From: Randy Schacher <stuart.schacher@broadcom.com>
- Update ULP layer to support RSS/Queue action
- Modify VNIC handling driver to support RSS action
- Modify VNIC handling driver to support Queue action
This should allow to enable TruFlow path for all RTE_FLOW
by default in future.
Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Shuanglin Wang <shuanglin.wang@broadcom.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
doc/guides/nics/features/bnxt.ini | 3 +
drivers/net/bnxt/bnxt.h | 39 +-
drivers/net/bnxt/bnxt_ethdev.c | 154 +--
drivers/net/bnxt/bnxt_filter.h | 6 +-
drivers/net/bnxt/bnxt_flow.c | 75 +-
drivers/net/bnxt/bnxt_hwrm.c | 187 +++-
drivers/net/bnxt/bnxt_hwrm.h | 30 +-
drivers/net/bnxt/bnxt_ring.c | 4 +-
drivers/net/bnxt/bnxt_rxq.c | 159 +--
drivers/net/bnxt/bnxt_rxr.c | 9 +-
drivers/net/bnxt/bnxt_txq.c | 2 +-
drivers/net/bnxt/bnxt_txr.c | 2 +-
drivers/net/bnxt/bnxt_txr.h | 2 +-
drivers/net/bnxt/bnxt_vnic.c | 969 +++++++++++++++++-
drivers/net/bnxt/bnxt_vnic.h | 80 +-
drivers/net/bnxt/meson.build | 3 +-
drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c | 248 ++++-
drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.h | 35 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 258 ++++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 1 +
drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 280 ++++-
drivers/net/bnxt/tf_ulp/meson.build | 25 +-
drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c | 13 +-
drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c | 37 +-
drivers/net/bnxt/tf_ulp/ulp_gen_tbl.h | 4 +-
drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c | 121 ++-
drivers/net/bnxt/tf_ulp/ulp_ha_mgr.h | 5 +-
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 449 +++++++-
drivers/net/bnxt/tf_ulp/ulp_mapper.h | 3 +-
drivers/net/bnxt/tf_ulp/ulp_matcher.c | 14 +-
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 58 ++
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 26 +
drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c | 22 +-
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 338 +++++-
drivers/net/bnxt/tf_ulp/ulp_rte_parser.h | 37 +-
35 files changed, 3281 insertions(+), 417 deletions(-)
diff --git a/doc/guides/nics/features/bnxt.ini b/doc/guides/nics/features/bnxt.ini
index 50a0b5bfa6..b225878a78 100644
--- a/doc/guides/nics/features/bnxt.ini
+++ b/doc/guides/nics/features/bnxt.ini
@@ -84,11 +84,14 @@ of_set_vlan_vid = Y
pf = Y
port_id = Y
port_representor = Y
+queue = Y
represented_port = Y
rss = Y
sample = Y
set_ipv4_dst = Y
set_ipv4_src = Y
+set_mac_dst = Y
+set_mac_src = Y
set_tp_dst = Y
set_tp_src = Y
vf = Y
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index bb2e7fe003..6dd3c8b87c 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include "tf_core.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
+#include "bnxt_vnic.h"
/* Vendor ID */
#define PCI_VENDOR_ID_BROADCOM 0x14E4
@@ -163,6 +164,8 @@
#define BNXT_HWRM_CMD_TO_FORWARD(cmd) \
(bp->pf->vf_req_fwd[(cmd) / 32] |= (1 << ((cmd) % 32)))
+#define BNXT_NTOHS rte_be_to_cpu_16
+
struct bnxt_led_info {
uint8_t num_leds;
uint8_t led_id;
@@ -238,11 +241,11 @@ struct bnxt_parent_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_MAX_VFS(bp) ((bp)->pf->max_vfs)
-#define BNXT_MAX_VF_REPS_WH 64
-#define BNXT_MAX_VF_REPS_TH 256
+#define BNXT_MAX_VF_REPS_P4 64
+#define BNXT_MAX_VF_REPS_P5 256
#define BNXT_MAX_VF_REPS(bp) \
- (BNXT_CHIP_P5(bp) ? BNXT_MAX_VF_REPS_TH : \
- BNXT_MAX_VF_REPS_WH)
+ (BNXT_CHIP_P5(bp) ? BNXT_MAX_VF_REPS_P5 : \
+ BNXT_MAX_VF_REPS_P4)
#define BNXT_TOTAL_VFS(bp) ((bp)->pf->total_vfs)
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
@@ -366,7 +369,7 @@ struct bnxt_ptp_cfg {
uint32_t tx_regs[BNXT_PTP_TX_REGS];
uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
- /* On Thor, the Rx timestamp is present in the Rx completion record */
+ /* On P5, the Rx timestamp is present in the Rx completion record */
uint64_t rx_timestamp;
uint64_t current_time;
};
@@ -679,8 +682,8 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->flags & BNXT_FLAG_NPAR_PF)
-#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
-#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_USE_CHIMP_MB 0 //For non-CFA commands, everything uses Chimp.
#define BNXT_USE_KONG(bp) ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
#define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
@@ -689,7 +692,7 @@ struct bnxt {
#define BNXT_HAS_NQ(bp) BNXT_CHIP_P5(bp)
#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_P5(bp))
#define BNXT_FLOW_XSTATS_EN(bp) ((bp)->flags & BNXT_FLAG_FLOW_XSTATS_EN)
-#define BNXT_HAS_DFLT_MAC_SET(bp) ((bp)->flags & BNXT_FLAG_DFLT_MAC_SET)
+#define BNXT_HAS_DFLT_MAC_SET(bp) ((bp)->flags & BNXT_FLAG_DFLT_MAC_SET)
#define BNXT_GFID_ENABLED(bp) ((bp)->flags & BNXT_FLAG_GFID_ENABLE)
uint32_t flags2;
@@ -697,8 +700,8 @@ struct bnxt {
#define BNXT_FLAGS2_PTP_ALARM_SCHEDULED BIT(1)
#define BNXT_P5_PTP_TIMESYNC_ENABLED(bp) \
((bp)->flags2 & BNXT_FLAGS2_PTP_TIMESYNC_ENABLED)
-#define BNXT_FLAGS2_TESTPMD_EN BIT(3)
-#define BNXT_TESTPMD_EN(bp) \
+#define BNXT_FLAGS2_TESTPMD_EN BIT(3)
+#define BNXT_TESTPMD_EN(bp) \
((bp)->flags2 & BNXT_FLAGS2_TESTPMD_EN)
uint16_t chip_num;
@@ -719,7 +722,8 @@ struct bnxt {
#define BNXT_FW_CAP_LINK_ADMIN BIT(7)
#define BNXT_FW_CAP_TRUFLOW_EN BIT(8)
#define BNXT_FW_CAP_VLAN_TX_INSERT BIT(9)
-#define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN)
+#define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN &&\
+ (bp)->app_id != 0xFF)
pthread_mutex_t flow_lock;
@@ -729,6 +733,7 @@ struct bnxt {
#define BNXT_VNIC_CAP_RX_CMPL_V2 BIT(2)
#define BNXT_VNIC_CAP_VLAN_RX_STRIP BIT(3)
#define BNXT_RX_VLAN_STRIP_EN(bp) ((bp)->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+#define BNXT_VNIC_CAP_OUTER_RSS_TRUSTED_VF BIT(4)
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
unsigned int rx_num_qs_per_vnic;
@@ -758,7 +763,6 @@ struct bnxt {
uint16_t nr_vnics;
-#define BNXT_GET_DEFAULT_VNIC(bp) (&(bp)->vnic_info[0])
struct bnxt_vnic_info *vnic_info;
STAILQ_HEAD(, bnxt_vnic_info) free_vnic_list;
@@ -873,6 +877,7 @@ struct bnxt {
uint16_t tx_cfa_action;
struct bnxt_ring_stats *prev_rx_ring_stats;
struct bnxt_ring_stats *prev_tx_ring_stats;
+ struct bnxt_vnic_queue_db vnic_queue_db;
#define BNXT_MAX_MC_ADDRS ((bp)->max_mcast_addr)
struct rte_ether_addr *mcast_addr_list;
@@ -905,7 +910,7 @@ inline uint16_t bnxt_max_rings(struct bnxt *bp)
}
/*
- * RSS table size in Thor is 512.
+ * RSS table size in P5 is 512.
* Cap max Rx rings to the same value for RSS.
*/
if (BNXT_CHIP_P5(bp))
@@ -997,9 +1002,16 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp);
bool is_bnxt_supported(struct rte_eth_dev *dev);
bool bnxt_stratus_device(struct bnxt *bp);
void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+uint16_t bnxt_rss_ctxts(const struct bnxt *bp);
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp);
int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete);
+int
+bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+int
+bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
extern const struct rte_flow_ops bnxt_flow_ops;
@@ -1053,5 +1065,6 @@ int bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
int bnxt_dev_start_op(struct rte_eth_dev *eth_dev);
int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev);
void bnxt_handle_vf_cfg_change(void *arg);
+struct bnxt_vnic_info *bnxt_get_default_vnic(struct bnxt *bp);
struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type);
#endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index bcde44bb14..4d84aaee0c 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -202,7 +202,7 @@ int is_bnxt_in_error(struct bnxt *bp)
* High level utility functions
*/
-static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
+uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
{
unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings,
BNXT_RSS_TBL_SIZE_P5);
@@ -421,6 +421,10 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
vnic_id, vnic, vnic->fw_grp_ids);
+ /* populate the fw group table */
+ bnxt_vnic_ring_grp_populate(bp, vnic);
+ bnxt_vnic_rules_init(vnic);
+
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc)
goto err_out;
@@ -429,7 +433,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
- /* RSS table size in Thor is 512.
+ /* RSS table size in P5 is 512.
* Cap max Rx rings to same value
*/
if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) {
@@ -479,9 +483,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
j, rxq->vnic, rxq->vnic->fw_grp_ids);
if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
- rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
- else
- vnic->rx_queue_cnt++;
+ vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
@@ -755,12 +757,18 @@ static int bnxt_start_nic(struct bnxt *bp)
else
bp->flags &= ~BNXT_FLAG_JUMBO;
- /* THOR does not support ring groups.
+ /* P5 does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
if (BNXT_CHIP_P5(bp))
bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
+ rc = bnxt_vnic_queue_db_init(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "could not allocate vnic db\n");
+ goto err_out;
+ }
+
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
@@ -808,6 +816,9 @@ static int bnxt_start_nic(struct bnxt *bp)
}
}
+ /* setup the default vnic details*/
+ bnxt_vnic_queue_db_update_dlft_vnic(bp);
+
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_setup_one_vnic(bp, i);
@@ -901,6 +912,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp)
bnxt_free_all_hwrm_resources(bp);
bnxt_free_all_filters(bp);
bnxt_free_all_vnics(bp);
+ bnxt_vnic_queue_db_deinit(bp);
return 0;
}
@@ -1431,7 +1443,6 @@ static void bnxt_ptp_get_current_time(void *arg)
bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
&ptp->current_time);
-
rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
if (rc != 0) {
PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n");
@@ -1450,6 +1461,7 @@ static int bnxt_schedule_ptp_alarm(struct bnxt *bp)
bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
&ptp->current_time);
+
rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
return rc;
}
@@ -1891,7 +1903,7 @@ static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
if (bp->vnic_info == NULL)
return 0;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
old_flags = vnic->flags;
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
@@ -1920,7 +1932,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
if (bp->vnic_info == NULL)
return 0;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
old_flags = vnic->flags;
vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
@@ -1949,7 +1961,7 @@ static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
if (bp->vnic_info == NULL)
return 0;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
old_flags = vnic->flags;
vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
@@ -1978,7 +1990,7 @@ static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
if (bp->vnic_info == NULL)
return 0;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
old_flags = vnic->flags;
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
@@ -2026,7 +2038,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp);
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft;
int i, rc;
@@ -2048,6 +2060,10 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
+ if (bnxt_vnic_reta_config_update(bp, vnic, reta_conf, reta_size)) {
+ PMD_DRV_LOG(ERR, "Error in setting the reta config\n");
+ return -EINVAL;
+ }
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
@@ -2058,11 +2074,6 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
continue;
rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
- if (!rxq) {
- PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
- return -EINVAL;
- }
-
if (BNXT_CHIP_P5(bp)) {
vnic->rss_table[i * 2] =
rxq->rx_ring->rx_ring_struct->fw_ring_id;
@@ -2073,7 +2084,6 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
}
-
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return rc;
}
@@ -2083,7 +2093,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
uint16_t reta_size)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp);
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft, i;
int rc;
@@ -2153,7 +2163,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
}
/* Update the default RSS VNIC(s) */
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
vnic->hash_mode =
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
@@ -2189,7 +2199,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp);
int len, rc;
uint32_t hash_types;
@@ -2348,7 +2358,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
}
/* Add UDP tunneling port */
-static int
+int
bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
@@ -2410,7 +2420,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
return rc;
}
-static int
+int
bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
@@ -2474,7 +2484,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
int rc = 0;
uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
filter = STAILQ_FIRST(&vnic->filter);
while (filter) {
/* Search for this matching MAC+VLAN filter */
@@ -2513,7 +2523,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
* then the HWRM shall only create an l2 context id.
*/
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
filter = STAILQ_FIRST(&vnic->filter);
/* Check if the VLAN has already been added */
while (filter) {
@@ -2618,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
unsigned int i;
int rc;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
@@ -2677,16 +2687,18 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
static int
bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
{
- struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp);
int rc;
/* Destroy, recreate and reconfigure the default vnic */
- rc = bnxt_free_one_vnic(bp, 0);
+ rc = bnxt_free_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id);
if (rc)
return rc;
- /* default vnic 0 */
- rc = bnxt_setup_one_vnic(bp, 0);
+ /* setup the default vnic details*/
+ bnxt_vnic_queue_db_update_dlft_vnic(bp);
+
+ rc = bnxt_setup_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id);
if (rc)
return rc;
@@ -2817,7 +2829,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
{
struct bnxt *bp = dev->data->dev_private;
/* Default Filter is tied to VNIC 0 */
- struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp);
int rc;
rc = is_bnxt_in_error(bp);
@@ -2867,7 +2879,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
if (rc)
return rc;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
bp->nb_mc_addr = nb_mc_addr;
@@ -3029,8 +3041,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
- uint32_t rc;
- uint32_t i;
+ uint32_t rc = 0;
rc = is_bnxt_in_error(bp);
if (rc)
@@ -3048,30 +3059,17 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
/* Is there a change in mtu setting? */
if (eth_dev->data->mtu == new_mtu)
- return 0;
+ return rc;
if (new_mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
else
bp->flags &= ~BNXT_FLAG_JUMBO;
- for (i = 0; i < bp->nr_vnics; i++) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- uint16_t size = 0;
-
- vnic->mru = BNXT_VNIC_MRU(new_mtu);
- rc = bnxt_hwrm_vnic_cfg(bp, vnic);
- if (rc)
- break;
-
- size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
- size -= RTE_PKTMBUF_HEADROOM;
-
- if (size < new_mtu) {
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
- }
+ rc = bnxt_vnic_mru_config(bp, new_mtu);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "failed to update mtu in vnic context\n");
+ return rc;
}
if (bnxt_hwrm_config_host_mtu(bp))
@@ -5312,9 +5310,11 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
{
int rc = 0;
- rc = bnxt_get_config(bp);
- if (rc)
- return rc;
+ if (reconfig_dev) {
+ rc = bnxt_get_config(bp);
+ if (rc)
+ return rc;
+ }
rc = bnxt_alloc_switch_domain(bp);
if (rc)
@@ -5756,7 +5756,7 @@ static int
bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
- int ret;
+ int ret = 0;
if (devargs == NULL)
return 0;
@@ -5825,22 +5825,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
bp->flags |= BNXT_FLAG_STINGRAY;
- if (BNXT_TRUFLOW_EN(bp)) {
- /* extra mbuf field is required to store CFA code from mark */
- static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = {
- .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME,
- .size = sizeof(bnxt_cfa_code_dynfield_t),
- .align = __alignof__(bnxt_cfa_code_dynfield_t),
- };
- bnxt_cfa_code_dynfield_offset =
- rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
- if (bnxt_cfa_code_dynfield_offset < 0) {
- PMD_DRV_LOG(ERR,
- "Failed to register mbuf field for TruFlow mark\n");
- return -rte_errno;
- }
- }
-
rc = bnxt_map_pci_bars(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
@@ -5878,6 +5862,26 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
if (rc)
return rc;
+ rc = bnxt_get_config(bp);
+ if (rc)
+ return rc;
+
+ if (BNXT_TRUFLOW_EN(bp)) {
+ /* extra mbuf field is required to store CFA code from mark */
+ static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = {
+ .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME,
+ .size = sizeof(bnxt_cfa_code_dynfield_t),
+ .align = __alignof__(bnxt_cfa_code_dynfield_t),
+ };
+ bnxt_cfa_code_dynfield_offset =
+ rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
+ if (bnxt_cfa_code_dynfield_offset < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to register mbuf field for TruFlow mark\n");
+ return -rte_errno;
+ }
+ }
+
return rc;
}
@@ -5912,6 +5916,9 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
bp = eth_dev->data->dev_private;
+ /* set the default app id */
+ bp->app_id = bnxt_ulp_default_app_id_get();
+
/* Parse dev arguments passed on when starting the DPDK application. */
rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs);
if (rc)
@@ -5948,7 +5955,8 @@ static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
if (!ctx)
return;
- rte_free(ctx->va);
+ if (ctx->va)
+ rte_free(ctx->va);
ctx->va = NULL;
ctx->dma = RTE_BAD_IOVA;
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 587932c96f..57d704d90b 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -173,4 +173,8 @@ struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_NUM_VLANS
#define L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS \
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS
+#define CFA_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE
#endif
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 4a107e81e9..28dd5ae6cb 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -677,7 +677,6 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
break;
}
break;
-
default:
break;
}
@@ -728,7 +727,7 @@ bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
struct bnxt_vnic_info *vnic0;
int i;
- vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic0 = bnxt_get_default_vnic(bp);
f0 = STAILQ_FIRST(&vnic0->filter);
/* This flow has same DST MAC as the port/l2 filter. */
@@ -905,6 +904,10 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
act,
"Failed to alloc VNIC group");
+ /* populate the fw group table */
+ bnxt_vnic_ring_grp_populate(bp, vnic);
+ bnxt_vnic_rules_init(vnic);
+
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
rte_flow_error_set(error, -rc,
@@ -1345,7 +1348,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
* The user specified redirect queue will be set while creating
* the ntuple filter in hardware.
*/
- vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic0 = bnxt_get_default_vnic(bp);
if (use_ntuple)
filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
else
@@ -1964,7 +1967,37 @@ bnxt_flow_create(struct rte_eth_dev *dev,
* in such a case.
*/
if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
- filter->enables == filter->tunnel_type) {
+ (filter->enables == filter->tunnel_type ||
+ filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
+ filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) {
+ if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) {
+ struct rte_eth_udp_tunnel tunnel = {0};
+
+ /* hwrm_tunnel_dst_port_alloc converts to Big Endian */
+ tunnel.udp_port = BNXT_NTOHS(filter->dst_port);
+ if (filter->tunnel_type ==
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) {
+ tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
+ } else if (filter->tunnel_type ==
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) {
+ tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Invalid tunnel type");
+ ret = -EINVAL;
+ goto free_filter;
+ }
+ ret = bnxt_udp_tunnel_port_add_op(bp->eth_dev, &tunnel);
+ if (ret != 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Fail to add tunnel port");
+ goto free_filter;
+ }
+ }
ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
if (ret) {
rte_flow_error_set(error, -ret,
@@ -2147,8 +2180,38 @@ _bnxt_flow_destroy(struct bnxt *bp,
filter = flow->filter;
vnic = flow->vnic;
+ /* If tunnel redirection to a VF/PF is specified then only tunnel_type
+ * is set and enable is set to the tunnel type. Issue hwrm cmd directly
+ * in such a case.
+ */
if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
- filter->enables == filter->tunnel_type) {
+ (filter->enables == filter->tunnel_type ||
+ filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
+ filter->tunnel_type == CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE)) {
+ if (filter->enables & NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT) {
+ struct rte_eth_udp_tunnel tunnel = {0};
+
+ /* hwrm_tunnel_dst_port_free converts to Big Endian */
+ tunnel.udp_port = BNXT_NTOHS(filter->dst_port);
+ if (filter->tunnel_type ==
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN) {
+ tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
+ } else if (filter->tunnel_type ==
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE) {
+ tunnel.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Invalid tunnel type");
+ return ret;
+ }
+
+ ret = bnxt_udp_tunnel_port_del_op(bp->eth_dev,
+ &tunnel);
+ if (ret)
+ return ret;
+ }
ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
if (!ret)
goto done;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 3f273df6f3..77588bdf49 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -668,6 +668,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
else
flags |=
HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+
req.flags = rte_cpu_to_le_32(flags);
req.enables = rte_cpu_to_le_32
(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
@@ -858,9 +859,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp))
bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
- if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+ if (BNXT_CHIP_P5(bp) || BNXT_PF(bp)) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
HWRM_UNLOCK();
@@ -894,6 +897,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (bp->tunnel_disable_flag)
PMD_DRV_LOG(DEBUG, "Tunnel parsing capability is disabled, flags : %#x\n",
bp->tunnel_disable_flag);
+
unlock:
HWRM_UNLOCK();
@@ -951,6 +955,11 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP) {
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS_TRUSTED_VF;
+ PMD_DRV_LOG(DEBUG, "Trusted VF's outer RSS capability is enabled\n");
+ }
+
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
@@ -1097,7 +1106,16 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
bp->tx_nr_rings +
BNXT_NUM_ASYNC_CPR(bp));
- req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
+ if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
+ req.num_vnics = rte_cpu_to_le_16(RTE_MIN(BNXT_VNIC_MAX_SUPPORTED_ID,
+ bp->max_vnics));
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(RTE_MIN(BNXT_VNIC_MAX_SUPPORTED_ID,
+ bp->max_rsscos_ctx));
+ } else {
+ req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
+ }
+
if (bp->vf_resv_strategy ==
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
@@ -1936,25 +1954,10 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cp
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- int rc = 0, i, j;
+ int rc = 0;
struct hwrm_vnic_alloc_input req = { 0 };
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- if (!BNXT_HAS_RING_GRPS(bp))
- goto skip_ring_grps;
-
- /* map ring groups to this vnic */
- PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
- vnic->start_grp_id, vnic->end_grp_id);
- for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
- vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
-
- vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
- vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
-
-skip_ring_grps:
vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
@@ -2068,7 +2071,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
*/
for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
- if (rxq->rx_started) {
+ if (rxq->rx_started &&
+ bnxt_vnic_queue_id_is_valid(vnic, i)) {
dflt_rxq = i;
break;
}
@@ -2298,14 +2302,22 @@ bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
- req.hash_mode_flags = vnic->hash_mode;
+ /* When the vnic_id in the request field is a valid
+ * one, the hash_mode_flags in the request field must
+ * be set to DEFAULT. And any request to change the
+ * default behavior must be done in a separate call
+ * to HWRM_VNIC_RSS_CFG by exclusively setting hash
+ * mode and vnic_id, rss_ctx_idx to INVALID.
+ */
+ req.hash_mode_flags = BNXT_HASH_MODE_DEFAULT;
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
- i * HW_HASH_INDEX_SIZE);
+ i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
+ 2 * sizeof(uint16_t));
req.ring_table_pair_index = i;
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
@@ -2314,23 +2326,74 @@ bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "RSS CFG: Hash level %d\n", req.hash_mode_flags);
}
return rc;
}
-int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
- struct bnxt_vnic_info *vnic)
+static int
+bnxt_hwrm_vnic_rss_cfg_hash_mode_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- int rc = 0;
- struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ int rc = 0;
- if (!vnic->rss_table)
+ /* The reason we are returning success here is that this
+ * call is in the context of user/stack RSS configuration.
+ * Even though OUTER RSS is not supported, the normal RSS
+ * configuration should continue to work.
+ */
+ if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
+ (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)))
return 0;
- if (BNXT_CHIP_P5(bp))
- return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ /* Don't call RSS hash level configuration if the current
+ * hash level is the same as the hash level that is requested.
+ */
+ if (vnic->prev_hash_mode == vnic->hash_mode)
+ return 0;
+
+ HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+ /* For FW, hash_mode == DEFAULT means that
+ * the FW is capable of doing INNER & OUTER RSS as well.
+ * DEFAULT doesn't mean that the FW is
+ * going to change the hash_mode to INNER. However, for
+ * the USER, DEFAULT means, change the hash mode to the
+ * NIC's DEFAULT hash mode which is INNER.
+ *
+ * Hence, driver should make the translation of hash_mode
+ * to INNERMOST when hash_mode from the dpdk stack is
+ * DEFAULT.
+ */
+ if (vnic->hash_mode == BNXT_HASH_MODE_DEFAULT)
+ req.hash_mode_flags = BNXT_HASH_MODE_INNERMOST;
+ else
+ req.hash_mode_flags = vnic->hash_mode;
+ req.vnic_id = rte_cpu_to_le_16(BNXT_DFLT_VNIC_ID_INVALID);
+ req.rss_ctx_idx = rte_cpu_to_le_16(BNXT_RSS_CTX_IDX_INVALID);
+
+ PMD_DRV_LOG(DEBUG, "RSS CFG: Hash level %d\n", req.hash_mode_flags);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+ BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ /* Store the programmed hash_mode in prev_hash_mode so that
+ * it can checked against the next user requested hash mode.
+ */
+ if (!rc)
+ vnic->prev_hash_mode = vnic->hash_mode;
+ HWRM_UNLOCK();
+ return rc;
+}
+
+static int
+bnxt_hwrm_vnic_rss_cfg_non_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
@@ -2352,6 +2415,39 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
return rc;
}
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+
+ if (!vnic->rss_table)
+ return 0;
+
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
+ return rc;
+ /* Configuring the hash mode has to be done in a
+ * different VNIC_RSS_CFG HWRM command by setting
+ * vnic_id & rss_ctx_id to INVALID. The only
+ * exception to this is if the USER doesn't want
+ * to change the default behavior. So, ideally
+ * bnxt_hwrm_vnic_rss_cfg_hash_mode_p5 should be
+ * called when user is explicitly changing the hash
+ * mode. However, this logic will unconditionally
+ * call bnxt_hwrm_vnic_rss_cfg_hash_mode_p5 to
+ * simplify the logic as there is no harm in calling
+ * bnxt_hwrm_vnic_rss_cfg_hash_mode_p5 even when
+ * user is not setting it explicitly. Because, this
+ * routine will convert the default value to inner
+ * which is our adapter's default behavior.
+ */
+ return bnxt_hwrm_vnic_rss_cfg_hash_mode_p5(bp, vnic);
+ }
+
+ return bnxt_hwrm_vnic_rss_cfg_non_p5(bp, vnic);
+}
+
int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
@@ -2893,6 +2989,7 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_free(bp, vnic);
rte_free(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
@@ -3977,6 +4074,36 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
return rc;
}
+int bnxt_hwrm_tunnel_upar_id_get(struct bnxt *bp, uint8_t *upar_id,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_query_input req = {0};
+ struct hwrm_tunnel_dst_port_query_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_QUERY, BNXT_USE_CHIMP_MB);
+ req.tunnel_type = tunnel_type;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ switch (tunnel_type) {
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI:
+ *upar_id = resp->upar_in_use;
+ break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6:
+ *upar_id = resp->upar_in_use;
+ break;
+ default:
+ /* INVALID UPAR Id if another tunnel type tries to retrieve */
+ *upar_id = 0xff;
+ break;
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
uint8_t tunnel_type)
{
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index f9d9fe0ef2..68384bc757 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -109,7 +109,31 @@ struct bnxt_pf_resource_info {
uint32_t num_hw_ring_grps;
};
-#define BNXT_CTX_VAL_INVAL 0xFFFF
+#define BNXT_CTX_VAL_INVAL 0xFFFF
+#define BNXT_RSS_CTX_IDX_INVALID 0xFFFF
+
+#define BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp) \
+ (!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN))
+#define BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp) \
+ (!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_NGE))
+#define BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp) \
+ (!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_GRE))
+#define BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp) \
+ (!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP))
+
+/*
+ * If the device supports VXLAN, GRE, IPIP and GENEVE tunnel parsing, then report
+ * RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM and
+ * RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM in the Rx/Tx offload capabilities of the device.
+ */
+#define BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp) \
+ (BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp) && \
+ BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp) && \
+ BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp) && \
+ BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
+
+#define BNXT_SIG_MODE_NRZ HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ
+#define BNXT_SIG_MODE_PAM4 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
#define BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp) \
(!((bp)->tunnel_disable_flag & HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN))
@@ -227,6 +251,8 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf);
int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
uint8_t tunnel_type);
+int bnxt_hwrm_tunnel_upar_id_get(struct bnxt *bp, uint8_t *upar_id,
+ uint8_t tunnel_type);
int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
uint8_t tunnel_type);
int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf);
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 4cdbb177d9..686c3af4da 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -54,7 +54,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
return -EBUSY;
}
- /* THOR does not support ring groups.
+ /* P5 does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
if (BNXT_CHIP_P5(bp)) {
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 99758dd304..0d0b5e28e4 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -400,7 +400,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->rx_started = rxq->rx_deferred_start ? false : true;
- rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ rxq->vnic = bnxt_get_default_vnic(bp);
return 0;
err:
@@ -460,6 +460,8 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ uint16_t vnic_idx = 0;
+ uint16_t fw_grp_id = 0;
int rc = 0;
rc = is_bnxt_in_error(bp);
@@ -471,6 +473,13 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
}
+ vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, &vnic_idx);
+ if (vnic == NULL) {
+ PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
/* reset the previous stats for the rx_queue since the counters
* will be cleared when the queue is started.
*/
@@ -490,29 +499,37 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return rc;
if (BNXT_HAS_RING_GRPS(bp))
- rxq->vnic->dflt_ring_grp = bp->grp_info[rx_queue_id].fw_grp_id;
- /* Reconfigure default receive ring and MRU. */
- bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
-
- PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ fw_grp_id = bp->grp_info[rx_queue_id].fw_grp_id;
- if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
- vnic = rxq->vnic;
+ do {
+ if (BNXT_HAS_RING_GRPS(bp))
+ vnic->dflt_ring_grp = fw_grp_id;
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ if (vnic->fw_grp_ids[rx_queue_id] !=
+ INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(ERR, "invalid ring id %d\n",
+ rx_queue_id);
+ return 0;
+ }
- if (BNXT_HAS_RING_GRPS(bp)) {
- if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
- return 0;
+ vnic->fw_grp_ids[rx_queue_id] = fw_grp_id;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, fw_grp_id);
+ }
- vnic->fw_grp_ids[rx_queue_id] =
- bp->grp_info[rx_queue_id].fw_grp_id;
- PMD_DRV_LOG(DEBUG,
- "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n",
+ vnic->rx_queue_cnt);
+ rc += bnxt_vnic_rss_queue_status_update(bp, vnic);
}
-
- PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
- rc = bnxt_vnic_rss_configure(bp, vnic);
- }
+ vnic_idx++;
+ } while ((vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id,
+ &vnic_idx)) != NULL);
if (rc != 0) {
dev->data->rx_queue_state[rx_queue_id] =
@@ -535,6 +552,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct bnxt_vnic_info *vnic = NULL;
struct bnxt_rx_queue *rxq = NULL;
int active_queue_cnt = 0;
+ uint16_t vnic_idx = 0, q_id = rx_queue_id;
int i, rc = 0;
rc = is_bnxt_in_error(bp);
@@ -556,61 +574,64 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
}
- vnic = rxq->vnic;
+ vnic = bnxt_vnic_queue_id_get_next(bp, q_id, &vnic_idx);
if (!vnic) {
- PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
- rx_queue_id);
+ PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n", q_id);
return -EINVAL;
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED;
rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
- if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
- if (BNXT_HAS_RING_GRPS(bp))
- vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
-
- PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
- rc = bnxt_vnic_rss_configure(bp, vnic);
- }
-
- /* Compute current number of active receive queues. */
- for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
- if (bp->rx_queues[i]->rx_started)
- active_queue_cnt++;
+ do {
+ active_queue_cnt = 0;
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ if (BNXT_HAS_RING_GRPS(bp))
+ vnic->fw_grp_ids[q_id] = INVALID_HW_RING_ID;
- if (BNXT_CHIP_P5(bp)) {
- /*
- * For Thor, we need to ensure that the VNIC default receive
- * ring corresponds to an active receive queue. When no queue
- * is active, we need to temporarily set the MRU to zero so
- * that packets are dropped early in the receive pipeline in
- * order to prevent the VNIC default receive ring from being
- * accessed.
- */
- if (active_queue_cnt == 0) {
- uint16_t saved_mru = vnic->mru;
-
- /* clear RSS setting on vnic. */
- bnxt_vnic_rss_clear_p5(bp, vnic);
-
- vnic->mru = 0;
- /* Reconfigure default receive ring and MRU. */
- bnxt_hwrm_vnic_cfg(bp, vnic);
- vnic->mru = saved_mru;
- } else {
- /* Reconfigure default receive ring. */
- bnxt_hwrm_vnic_cfg(bp, vnic);
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n",
+ vnic->rx_queue_cnt);
+ rc = bnxt_vnic_rss_queue_status_update(bp, vnic);
}
- } else if (active_queue_cnt) {
- /*
- * If the queue being stopped is the current default queue and
- * there are other active queues, pick one of them as the
- * default and reconfigure the vnic.
- */
- if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
- for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+
+ /* Compute current number of active receive queues. */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+ if (bp->rx_queues[i]->rx_started)
+ active_queue_cnt++;
+
+ if (BNXT_CHIP_P5(bp)) {
+ /*
+ * For P5, we need to ensure that the VNIC default
+ * receive ring corresponds to an active receive queue.
+ * When no queue is active, we need to temporarily set
+ * the MRU to zero so that packets are dropped early in
+ * the receive pipeline in order to prevent the VNIC
+ * default receive ring from being accessed.
+ */
+ if (active_queue_cnt == 0) {
+ uint16_t saved_mru = vnic->mru;
+
+ /* clear RSS setting on vnic. */
+ bnxt_vnic_rss_clear_p5(bp, vnic);
+
+ vnic->mru = 0;
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ vnic->mru = saved_mru;
+ } else {
+ /* Reconfigure default receive ring. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ } else if (active_queue_cnt && vnic->dflt_ring_grp ==
+ bp->grp_info[q_id].fw_grp_id) {
+ /*
+ * If the queue being stopped is the current default
+ * queue and there are other active queues, pick one of
+ * them as the default and reconfigure the vnic.
+ */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id;
+ i++) {
if (bp->rx_queues[i]->rx_started) {
vnic->dflt_ring_grp =
bp->grp_info[i].fw_grp_id;
@@ -619,7 +640,9 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
}
}
- }
+ vnic_idx++;
+ } while ((vnic = bnxt_vnic_queue_id_get_next(bp, q_id,
+ &vnic_idx)) != NULL);
if (rc == 0)
bnxt_rx_queue_release_mbufs(rxq);
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index a067278dca..1ab0ef2f5d 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -691,7 +691,7 @@ static void
bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- uint64_t last_hwrm_time;
+ uint64_t last_hwrm_time = 0;
uint64_t pkt_time = 0;
if (!BNXT_CHIP_P5(bp) || !ptp)
@@ -705,7 +705,6 @@ bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
* from the HWRM response with the lower 32 bits in the
* Rx completion to produce the 48 bit timestamp for the Rx packet
*/
- last_hwrm_time = ptp->current_time;
pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
/* timer has rolled over */
@@ -923,7 +922,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
RX_PKT_CMPL_FLAGS_MASK) ==
- RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
@@ -1089,6 +1088,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
}
+ cpr->cp_raw_cons = raw_cons;
if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
/*
* For PMD, there is no need to keep on pushing to REARM
@@ -1097,7 +1097,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
goto done;
}
- cpr->cp_raw_cons = raw_cons;
/* Ring the completion queue doorbell. */
bnxt_db_cq(cpr);
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index c8745add5e..d1d1fe8f1f 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 21c2217092..10b716a00b 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 75456df5bd..b9b8a9b1a2 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index b3c03a2af5..be9c127b64 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -9,8 +9,26 @@
#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_ring.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
+#include "bnxt_hwrm.h"
+
+/* Macros to manipulate vnic bitmaps*/
+#define BNXT_VNIC_BITMAP_SIZE 64
+#define BNXT_VNIC_BITMAP_SET(b, i) ((b[(i) / BNXT_VNIC_BITMAP_SIZE]) |= \
+ (1UL << ((BNXT_VNIC_BITMAP_SIZE - 1) - \
+ ((i) % BNXT_VNIC_BITMAP_SIZE))))
+
+#define BNXT_VNIC_BITMAP_RESET(b, i) ((b[(i) / BNXT_VNIC_BITMAP_SIZE]) &= \
+ (~(1UL << ((BNXT_VNIC_BITMAP_SIZE - 1) - \
+ ((i) % BNXT_VNIC_BITMAP_SIZE)))))
+
+#define BNXT_VNIC_BITMAP_GET(b, i) (((b[(i) / BNXT_VNIC_BITMAP_SIZE]) >> \
+ ((BNXT_VNIC_BITMAP_SIZE - 1) - \
+ ((i) % BNXT_VNIC_BITMAP_SIZE))) & 1)
/*
* VNIC Functions
@@ -51,6 +69,8 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->hash_mode =
HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+ vnic->prev_hash_mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
vnic->rx_queue_cnt = 0;
STAILQ_INIT(&vnic->filter);
@@ -84,6 +104,11 @@ void bnxt_free_all_vnics(struct bnxt *bp)
for (i = 0; i < bp->max_vnics; i++) {
vnic = &bp->vnic_info[i];
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
+ if (vnic->ref_cnt) {
+ /* clean up the default vnic details */
+ bnxt_vnic_rss_action_free(bp, i);
+ }
+
vnic->rx_queue_cnt = 0;
}
}
@@ -212,6 +237,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+ uint32_t i;
vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
if (!vnic->fw_grp_ids) {
@@ -220,7 +246,10 @@ int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
size);
return -ENOMEM;
}
- memset(vnic->fw_grp_ids, -1, size);
+
+ /* Initialize to invalid ring id */
+ for (i = 0; i < bp->max_ring_grps; i++)
+ vnic->fw_grp_ids[i] = INVALID_HW_RING_ID;
return 0;
}
@@ -259,30 +288,27 @@ int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
/* If FW has not advertised capability to configure outer/inner
* RSS hashing , just log a message. HW will work in default RSS mode.
*/
- if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)) {
- PMD_DRV_LOG(ERR, "RSS hash level cannot be configured\n");
+ if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
+ (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))) {
+ if (lvl)
+ PMD_DRV_LOG(INFO,
+ "Given RSS level is unsupported, using default RSS level\n");
return mode;
}
switch (lvl) {
case BNXT_RSS_LEVEL_INNERMOST:
- if (l3_and_l4 || l4)
- mode =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4;
- else if (l3_only)
- mode =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2;
+ /* Irrespective of what RTE says, FW always does 4 tuple */
+ if (l3_and_l4 || l4 || l3_only)
+ mode = BNXT_HASH_MODE_INNERMOST;
break;
case BNXT_RSS_LEVEL_OUTERMOST:
- if (l3_and_l4 || l4)
- mode =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4;
- else if (l3_only)
- mode =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2;
+ /* Irrespective of what RTE says, FW always does 4 tuple */
+ if (l3_and_l4 || l4 || l3_only)
+ mode = BNXT_HASH_MODE_OUTERMOST;
break;
default:
- mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+ mode = BNXT_HASH_MODE_DEFAULT;
break;
}
@@ -296,7 +322,8 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
/* If FW has not advertised capability to configure inner/outer RSS
* return default hash mode.
*/
- if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
+ if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
+ (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)))
return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
@@ -310,3 +337,909 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
return rss_level;
}
+
+static
+int32_t bnxt_vnic_populate_rss_table_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ uint32_t ctx_idx = 0, rss_idx = 0, cnt = 0;
+ uint32_t q_id = -1;
+ struct bnxt_rx_queue *rxq;
+ uint16_t *ring_tbl = vnic->rss_table;
+ uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ uint16_t ring_id;
+
+ /* For P5 platform */
+ for (ctx_idx = 0; ctx_idx < vnic->num_lb_ctxts; ctx_idx++) {
+ for (rss_idx = 0; rss_idx < BNXT_RSS_ENTRIES_PER_CTX_P5;
+ rss_idx++) {
+ /* Find next active ring. */
+ for (cnt = 0; cnt < BNXT_VNIC_MAX_QUEUE_SIZE; cnt++) {
+ if (++q_id == bp->rx_nr_rings)
+ q_id = 0; /* reset the q_id */
+ if (BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
+ q_id) &&
+ rx_queue_state[q_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ break;
+ }
+
+ /* no active queues exit */
+ if (cnt == BNXT_VNIC_MAX_QUEUE_SIZE)
+ return 0;
+
+ rxq = bp->rx_queues[q_id];
+ ring_id = rxq->rx_ring->rx_ring_struct->fw_ring_id;
+ *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+ ring_id = rxq->cp_ring->cp_ring_struct->fw_ring_id;
+ *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+ }
+ }
+ return 0;
+}
+
+static
+int32_t bnxt_vnic_populate_rss_table_p4(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ uint32_t rss_idx = 0, cnt = 0;
+ uint32_t q_id = -1;
+ uint16_t *ring_tbl = vnic->rss_table;
+ uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ uint16_t ring_id;
+
+ /* For Wh+ platform */
+ for (rss_idx = 0; rss_idx < bnxt_rss_hash_tbl_size(bp); rss_idx++) {
+ /* Find next active ring. */
+ for (cnt = 0; cnt < BNXT_VNIC_MAX_QUEUE_SIZE; cnt++) {
+ if (++q_id == bp->rx_nr_rings)
+ q_id = 0; /* reset the q_id */
+ if (BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
+ q_id) &&
+ rx_queue_state[q_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ break;
+ }
+
+ /* no active queues exit */
+ if (cnt == BNXT_VNIC_MAX_QUEUE_SIZE)
+ return 0;
+
+ ring_id = vnic->fw_grp_ids[q_id];
+ *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+ }
+ return 0;
+}
+
+static
+int32_t bnxt_vnic_populate_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ /* RSS table population is different for p4 and p5 platforms */
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_vnic_populate_rss_table_p5(bp, vnic);
+
+ return bnxt_vnic_populate_rss_table_p4(bp, vnic);
+}
+
+static void
+bnxt_vnic_queue_delete(struct bnxt *bp, uint16_t vnic_idx)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_idx];
+
+ if (bnxt_hwrm_vnic_free(bp, vnic))
+ PMD_DRV_LOG(ERR, "Failed to delete queue\n");
+
+ if (vnic->fw_grp_ids) {
+ rte_free(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+ }
+
+ vnic->rx_queue_cnt = 0;
+ if (bp->nr_vnics)
+ bp->nr_vnics--;
+
+ /* reset the queue_bitmap */
+ memset(vnic->queue_bitmap, 0, sizeof(vnic->queue_bitmap));
+}
+
+static struct bnxt_vnic_info*
+bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
+{
+ uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_rx_queue *rxq = NULL;
+ int32_t rc = -EINVAL;
+ uint16_t saved_mru = 0;
+
+ vnic = &bp->vnic_info[vnic_id];
+ if (vnic->rx_queue_cnt) {
+ PMD_DRV_LOG(ERR, "invalid queue configuration %d\n", vnic_id);
+ return NULL;
+ }
+
+ /* set the queue_bitmap */
+ BNXT_VNIC_BITMAP_SET(vnic->queue_bitmap, q_index);
+
+ rxq = bp->rx_queues[q_index];
+ if (rx_queue_state[q_index] == RTE_ETH_QUEUE_STATE_STOPPED)
+ rxq->rx_started = 0;
+ else
+ rxq->rx_started = 1;
+
+ vnic->rx_queue_cnt++;
+ vnic->start_grp_id = q_index;
+ vnic->end_grp_id = q_index + 1;
+ vnic->func_default = 0; /* This is not a default VNIC. */
+ bp->nr_vnics++;
+
+ /* Allocate vnic group for p4 platform */
+ rc = bnxt_vnic_grp_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(DEBUG, "Failed to allocate vnic groups\n");
+ goto cleanup;
+ }
+
+ /* populate the fw group table */
+ bnxt_vnic_ring_grp_populate(bp, vnic);
+ bnxt_vnic_rules_init(vnic);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(DEBUG, "Failed to allocate vnic %d\n", q_index);
+ goto cleanup;
+ }
+
+ /* store the mru so we can set it to zero in hw */
+ if (rxq->rx_started == 0) {
+ saved_mru = vnic->mru;
+ vnic->mru = 0;
+ }
+
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rxq->rx_started == 0)
+ vnic->mru = saved_mru;
+
+ if (rc) {
+ PMD_DRV_LOG(DEBUG, "Failed to configure vnic %d\n", q_index);
+ goto cleanup;
+ }
+
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(DEBUG, "Failed to configure vnic plcmode %d\n",
+ q_index);
+ goto cleanup;
+ }
+
+ vnic->ref_cnt++;
+ return vnic;
+
+cleanup:
+ bnxt_vnic_queue_delete(bp, vnic_id);
+ return NULL;
+}
+
+static inline int32_t
+bnxt_vnic_queue_db_lookup(struct bnxt *bp, uint64_t *q_list)
+{
+ /* lookup in the database to check if it is in use */
+ return rte_hash_lookup(bp->vnic_queue_db.rss_q_db,
+ (const void *)q_list);
+}
+
+static inline int32_t
+bnxt_vnic_queue_db_del(struct bnxt *bp, uint64_t *q_list)
+{
+ return rte_hash_del_key(bp->vnic_queue_db.rss_q_db,
+ (const void *)q_list);
+}
+
+static int32_t
+bnxt_vnic_queue_db_add(struct bnxt *bp, uint64_t *q_list)
+{
+ struct bnxt_vnic_info *vnic_info;
+ int32_t vnic_id, rc = -1;
+
+ vnic_id = rte_hash_add_key(bp->vnic_queue_db.rss_q_db,
+ (const void *)q_list);
+
+ if (vnic_id < 0 || vnic_id >= bp->max_vnics) {
+ PMD_DRV_LOG(DEBUG, "unable to assign vnic index %d\n",
+ vnic_id);
+ return rc;
+ }
+
+ vnic_info = &bp->vnic_info[vnic_id];
+ if (vnic_info->fw_vnic_id != INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "Invalid ring id for %d.\n", vnic_id);
+ return rc;
+ }
+ return vnic_id;
+}
+
+/* Function to validate the incoming rss configuration */
+static
+int32_t bnxt_vnic_queue_db_rss_validate(struct bnxt *bp,
+ struct bnxt_vnic_rss_info *rss_info,
+ int32_t *vnic_idx)
+{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ int32_t rc = -EINVAL;
+ uint32_t idx = 0;
+ int32_t out_idx;
+
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) {
+ PMD_DRV_LOG(ERR, "Error Rss is not supported on this port\n");
+ return rc;
+ }
+
+ /* rss queue is zero then use the default vnic */
+ if (rss_info->queue_num == 0) {
+ *vnic_idx = 0;
+ return 0;
+ }
+
+ /* Check to see if the queues id are in supported range */
+ if (rss_info->queue_num > bp->rx_nr_rings) {
+ PMD_DRV_LOG(ERR, "Error unsupported queue num.\n");
+ return rc;
+ }
+
+ /* validate the queue ids are in correct range */
+ for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++) {
+ if (BNXT_VNIC_BITMAP_GET(rss_info->queue_list, idx)) {
+ if (idx >= bp->rx_nr_rings) {
+ PMD_DRV_LOG(ERR,
+ "Error %d beyond support size %u\n",
+ idx, bp->rx_nr_rings);
+ return rc;
+ }
+ }
+ }
+
+ /* check if the vnic already exist */
+ out_idx = bnxt_vnic_queue_db_lookup(bp, rss_info->queue_list);
+ if (out_idx < 0 || out_idx >= bp->max_vnics)
+ return -ENOENT; /* entry not found */
+
+ /* found an entry */
+ *vnic_idx = out_idx;
+ return 0;
+}
+
+static void
+bnxt_vnic_rss_delete(struct bnxt *bp, uint16_t q_index)
+{
+ struct bnxt_vnic_info *vnic;
+
+ vnic = &bp->vnic_info[q_index];
+ if (vnic->rx_queue_cnt >= 1)
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_free(bp, vnic);
+
+ if (vnic->fw_grp_ids) {
+ rte_free(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+ }
+
+ /* Update the vnic details for all the rx queues */
+ vnic->rx_queue_cnt = 0;
+ memset(vnic->queue_bitmap, 0, sizeof(vnic->queue_bitmap));
+
+ if (bp->nr_vnics)
+ bp->nr_vnics--;
+}
+
+/* The validation of the rss_info should be done before calling this function*/
+
+static struct bnxt_vnic_info *
+bnxt_vnic_rss_create(struct bnxt *bp,
+ struct bnxt_vnic_rss_info *rss_info,
+ uint16_t vnic_id)
+{
+ uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_rx_queue *rxq = NULL;
+ uint32_t idx, nr_ctxs, config_rss = 0;
+ uint16_t saved_mru = 0;
+ uint16_t active_q_cnt = 0;
+ int16_t first_q = -1;
+ int16_t end_q = -1;
+ int32_t rc = 0;
+
+ /* Assign the vnic to be used for this rss configuration */
+ vnic = &bp->vnic_info[vnic_id];
+
+ /* Update the vnic details for all the rx queues */
+ for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++) {
+ if (BNXT_VNIC_BITMAP_GET(rss_info->queue_list, idx)) {
+ rxq = bp->rx_queues[idx];
+ if (rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ rxq->rx_started = 0;
+ } else {
+ rxq->rx_started = 1;
+ active_q_cnt++;
+ }
+ vnic->rx_queue_cnt++;
+
+ /* Update the queue list */
+ BNXT_VNIC_BITMAP_SET(vnic->queue_bitmap, idx);
+ if (first_q == -1)
+ first_q = idx;
+ end_q = idx;
+ }
+ }
+ vnic->start_grp_id = first_q;
+ vnic->end_grp_id = end_q + 1;
+ vnic->func_default = 0; /* This is not a default VNIC. */
+ bp->nr_vnics++;
+
+ /* Allocate vnic group for p4 platform */
+ rc = bnxt_vnic_grp_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to allocate vnic groups\n");
+ goto fail_cleanup;
+ }
+
+ /* populate the fw group table */
+ bnxt_vnic_ring_grp_populate(bp, vnic);
+ bnxt_vnic_rules_init(vnic);
+
+ /* Allocate the vnic in the firmware */
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to allocate vnic %d\n", idx);
+ goto fail_cleanup;
+ }
+
+ /* Allocate the vnic rss context */
+ /* RSS table size in P5 is 512. Cap max Rx rings to same value */
+ nr_ctxs = bnxt_rss_ctxts(bp);
+ for (idx = 0; idx < nr_ctxs; idx++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, idx);
+ if (rc)
+ break;
+ }
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM ctx %d alloc failure rc: %x\n", idx, rc);
+ goto fail_cleanup;
+ }
+ vnic->num_lb_ctxts = nr_ctxs;
+
+ saved_mru = vnic->mru;
+ if (!active_q_cnt)
+ vnic->mru = 0;
+
+ /* configure the vnic details in firmware */
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ vnic->mru = saved_mru;
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to configure vnic %d\n", idx);
+ goto fail_cleanup;
+ }
+
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to configure vnic plcmode %d\n",
+ idx);
+ goto fail_cleanup;
+ }
+
+ /* hwrm_type conversion */
+ vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_info->rss_types);
+ vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_info->rss_types,
+ rss_info->rss_level);
+
+ /* configure the key */
+ if (!rss_info->key_len)
+ /* If hash key has not been specified, use random hash key.*/
+ bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ else
+ memcpy(vnic->rss_hash_key, rss_info->key, rss_info->key_len);
+
+ /* Prepare the indirection table */
+ bnxt_vnic_populate_rss_table(bp, vnic);
+
+ /* check to see if there is at least one queue that is active */
+ for (idx = vnic->start_grp_id; idx < vnic->end_grp_id; idx++) {
+ if (bnxt_vnic_queue_id_is_valid(vnic, idx) &&
+ bp->rx_queues[idx]->rx_started) {
+ config_rss = 1;
+ break;
+ }
+ }
+
+ /* configure the rss table */
+ if (config_rss) {
+ rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ if (rc) {
+ memset(vnic->rss_hash_key, 0, HW_HASH_KEY_SIZE);
+ PMD_DRV_LOG(ERR,
+ "Failed to configure vnic rss details %d\n",
+ idx);
+ goto fail_cleanup;
+ }
+ }
+
+ vnic->ref_cnt++;
+ return vnic;
+
+fail_cleanup:
+ bnxt_vnic_rss_delete(bp, idx);
+ return NULL;
+}
+
+int32_t
+bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
+ if (!(vnic->rss_table && vnic->hash_type))
+ return 0;
+
+ /* Prepare the indirection table */
+ bnxt_vnic_populate_rss_table(bp, vnic);
+
+ /* configure the rss table */
+ if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
+ PMD_DRV_LOG(DEBUG, "Failed to update vnic rss details\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int32_t
+bnxt_vnic_rss_hash_algo_update(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_vnic_rss_info *rss_info)
+{
+ uint8_t old_rss_hash_key[HW_HASH_KEY_SIZE] = { 0 };
+ uint16_t hash_type;
+ uint8_t hash_mode;
+ uint32_t apply = 0;
+
+ /* validate key length */
+ if (rss_info->key_len != 0 && rss_info->key_len != HW_HASH_KEY_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Invalid hashkey length, should be %d bytes\n",
+ HW_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+
+ /* hwrm_type conversion */
+ hash_type = bnxt_rte_to_hwrm_hash_types(rss_info->rss_types);
+ hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_info->rss_types,
+ rss_info->rss_level);
+ if (vnic->hash_mode != hash_mode ||
+ vnic->hash_type != hash_type) {
+ apply = 1;
+ vnic->hash_mode = hash_mode;
+ vnic->hash_type = hash_type;
+ }
+ /* Store the old hash key before programming the new one. It will
+ * be used to restore the old hash key when HWRM_VNIC_RSS_CFG
+ * fails.
+ */
+ memcpy(old_rss_hash_key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ if (rss_info->key_len != 0 && memcmp(rss_info->key, vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE)) {
+ apply = 1;
+ memcpy(vnic->rss_hash_key, rss_info->key, HW_HASH_KEY_SIZE);
+ }
+
+ if (apply) {
+ if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
+ memcpy(vnic->rss_hash_key, old_rss_hash_key, HW_HASH_KEY_SIZE);
+ BNXT_TF_DBG(ERR, "Error configuring vnic RSS config\n");
+ return -EINVAL;
+ }
+ BNXT_TF_DBG(INFO, "Rss config successfully applied\n");
+ }
+ return 0;
+}
+
+int32_t bnxt_vnic_queue_db_deinit(struct bnxt *bp)
+{
+ if (bp->vnic_queue_db.rss_q_db != NULL)
+ rte_hash_free(bp->vnic_queue_db.rss_q_db);
+ return 0;
+}
+
+int32_t bnxt_vnic_queue_db_init(struct bnxt *bp)
+{
+ struct rte_hash_parameters hash_tbl_params = {0};
+ char hash_tbl_name[64] = {0};
+
+ /* choose the least supported value */
+ if (bp->rx_nr_rings > BNXT_VNIC_MAX_QUEUE_SIZE)
+ bp->vnic_queue_db.num_queues = BNXT_VNIC_MAX_QUEUE_SIZE;
+ else
+ bp->vnic_queue_db.num_queues = bp->rx_nr_rings;
+
+ /* create the hash table for the rss hash entries */
+ snprintf(hash_tbl_name, sizeof(hash_tbl_name),
+ "bnxt_rss_hash_%d", bp->eth_dev->data->port_id);
+ hash_tbl_params.name = hash_tbl_name;
+ hash_tbl_params.entries = (bp->max_vnics > BNXT_VNIC_MAX_SUPPORTED_ID) ?
+ BNXT_VNIC_MAX_SUPPORTED_ID : bp->max_vnics;
+ hash_tbl_params.key_len = BNXT_VNIC_MAX_QUEUE_SZ_IN_8BITS;
+ hash_tbl_params.socket_id = rte_socket_id();
+ bp->vnic_queue_db.rss_q_db = rte_hash_create(&hash_tbl_params);
+ if (bp->vnic_queue_db.rss_q_db == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to create rss hash tbl\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void bnxt_vnic_queue_db_update_dlft_vnic(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *dflt_vnic;
+ uint64_t bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
+ uint32_t idx;
+ int32_t vnic_id;
+
+ /* populate all the queue ids in the default vnic */
+ memset(bitmap, 0, sizeof(bitmap));
+ for (idx = 0; idx < bp->vnic_queue_db.num_queues; idx++)
+ BNXT_VNIC_BITMAP_SET(bitmap, idx);
+
+ vnic_id = bnxt_vnic_queue_db_add(bp, bitmap);
+ if (vnic_id < 0) {
+ PMD_DRV_LOG(ERR, "Unable to alloc vnic for default rss\n");
+ return;
+ }
+
+ dflt_vnic = bnxt_vnic_queue_db_get_vnic(bp, vnic_id);
+ if (dflt_vnic == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid vnic for default rss %d\n", vnic_id);
+ return;
+ }
+ /* Update the default vnic structure */
+ bp->vnic_queue_db.dflt_vnic_id = vnic_id;
+ memcpy(dflt_vnic->queue_bitmap, bitmap, sizeof(bitmap));
+ dflt_vnic->rx_queue_cnt = bp->vnic_queue_db.num_queues;
+ dflt_vnic->ref_cnt++;
+}
+
+int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp,
+ uint16_t q_index,
+ uint16_t *vnic_idx,
+ uint16_t *vnicid)
+{
+ uint64_t queue_list[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS] = {0};
+ struct bnxt_vnic_info *vnic_info;
+ int32_t idx;
+ int32_t rc = -EINVAL;
+
+ /* validate the given queue id */
+ if (q_index >= bp->rx_nr_rings || q_index >= BNXT_VNIC_MAX_QUEUE_SIZE) {
+ PMD_DRV_LOG(ERR, "invalid queue id should be less than %d\n",
+ bp->rx_nr_rings);
+ return rc;
+ }
+
+ /* Populate the queue list */
+ BNXT_VNIC_BITMAP_SET(queue_list, q_index);
+
+ /* check to see if the q_index is already in use */
+ idx = bnxt_vnic_queue_db_lookup(bp, queue_list);
+ if (idx < 0) {
+ /* Assign the vnic slot */
+ idx = bnxt_vnic_queue_db_add(bp, queue_list);
+ if (idx < 0) {
+ PMD_DRV_LOG(DEBUG, "Unable to alloc vnic for queue\n");
+ return rc;
+ }
+
+ /* Allocate a new one */
+ vnic_info = bnxt_vnic_queue_create(bp, idx, q_index);
+ if (!vnic_info) {
+ PMD_DRV_LOG(ERR, "failed to create vnic - %d\n",
+ q_index);
+ bnxt_vnic_queue_db_del(bp, queue_list);
+ return rc; /* failed */
+ }
+ } else {
+ vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
+ if (vnic_info == NULL) {
+ PMD_DRV_LOG(ERR, "Unable to lookup vnic for queue %d\n",
+ q_index);
+ return rc;
+ }
+ /* increment the reference count and return the vnic id */
+ vnic_info->ref_cnt++;
+ }
+ *vnic_idx = (uint16_t)idx;
+ *vnicid = vnic_info->fw_vnic_id;
+ return 0;
+}
+
+int32_t
+bnxt_vnic_queue_action_free(struct bnxt *bp, uint16_t vnic_id)
+{
+ struct bnxt_vnic_info *vnic_info;
+ int32_t rc = -EINVAL;
+ int32_t vnic_idx = vnic_id, idx;
+
+ /* validate the given vnic idx */
+ if (vnic_idx >= bp->max_vnics) {
+ PMD_DRV_LOG(ERR, "invalid vnic idx %d\n", vnic_idx);
+ return rc;
+ }
+
+ /* validate the vnic info */
+ vnic_info = &bp->vnic_info[vnic_idx];
+ if (!vnic_info->rx_queue_cnt) {
+ PMD_DRV_LOG(ERR, "Invalid vnic idx, no queues being used\n");
+ return rc;
+ }
+ if (vnic_info->ref_cnt) {
+ vnic_info->ref_cnt--;
+ if (!vnic_info->ref_cnt) {
+ idx = bnxt_vnic_queue_db_del(bp,
+ vnic_info->queue_bitmap);
+ /* Check to ensure there is no corruption */
+ if (idx != vnic_idx)
+ PMD_DRV_LOG(ERR, "bad vnic idx %d\n", vnic_idx);
+
+ bnxt_vnic_queue_delete(bp, vnic_idx);
+ }
+ }
+ return 0;
+}
+
+int32_t
+bnxt_vnic_rss_action_alloc(struct bnxt *bp,
+ struct bnxt_vnic_rss_info *rss_info,
+ uint16_t *vnic_idx,
+ uint16_t *vnicid)
+{
+ struct bnxt_vnic_info *vnic_info = NULL;
+ int32_t rc = -EINVAL;
+ int32_t idx;
+
+ /* validate the given parameters */
+ rc = bnxt_vnic_queue_db_rss_validate(bp, rss_info, &idx);
+ if (rc == -EINVAL) {
+ PMD_DRV_LOG(ERR, "Failed to apply the rss action.\n");
+ return rc;
+ } else if (rc == -ENOENT) {
+ /* Allocate a new entry */
+ idx = bnxt_vnic_queue_db_add(bp, rss_info->queue_list);
+ if (idx < 0) {
+ PMD_DRV_LOG(DEBUG, "Unable to alloc vnic for rss\n");
+ return rc;
+ }
+ /* create the rss vnic */
+ vnic_info = bnxt_vnic_rss_create(bp, rss_info, idx);
+ if (!vnic_info) {
+ PMD_DRV_LOG(ERR, "Failed to create rss action.\n");
+ bnxt_vnic_queue_db_del(bp, rss_info->queue_list);
+ return rc;
+ }
+ } else {
+ vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
+ if (vnic_info == NULL) {
+ PMD_DRV_LOG(ERR, "Unable to lookup vnic for idx %d\n",
+ idx);
+ return rc;
+ }
+ /* increment the reference count and return the vnic id */
+ vnic_info->ref_cnt++;
+
+ /* check configuration has changed then update hash details */
+ rc = bnxt_vnic_rss_hash_algo_update(bp, vnic_info, rss_info);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to update the rss action.\n");
+ return rc;
+ }
+ }
+ *vnic_idx = idx;
+ *vnicid = vnic_info->fw_vnic_id;
+ return 0;
+}
+
+/* Delete the vnic associated with the given rss action index */
+int32_t
+bnxt_vnic_rss_action_free(struct bnxt *bp, uint16_t vnic_id)
+{
+ uint64_t bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
+ struct bnxt_vnic_info *vnic_info;
+ int32_t rc = -EINVAL;
+ uint64_t *q_list;
+ int32_t idx = 0;
+
+ /* validate the given vnic id */
+ if (vnic_id >= bp->max_vnics) {
+ PMD_DRV_LOG(ERR, "invalid vnic id %d\n", vnic_id);
+ return rc;
+ }
+
+ /* validate vnic info */
+ vnic_info = &bp->vnic_info[vnic_id];
+ if (!vnic_info->rx_queue_cnt) {
+ PMD_DRV_LOG(ERR, "Invalid vnic id, not using any queues\n");
+ return rc;
+ }
+
+ if (vnic_info->ref_cnt) {
+ vnic_info->ref_cnt--;
+ if (!vnic_info->ref_cnt) {
+ if (bp->vnic_queue_db.dflt_vnic_id == vnic_id) {
+ /* in case of default queue, list can be
+ * changed by reta config so need a list
+ * with all queues populated.
+ */
+ memset(bitmap, 0, sizeof(bitmap));
+ for (idx = 0;
+ idx < bp->vnic_queue_db.num_queues;
+ idx++)
+ BNXT_VNIC_BITMAP_SET(bitmap, idx);
+ q_list = bitmap;
+ } else {
+ q_list = vnic_info->queue_bitmap;
+ }
+ idx = bnxt_vnic_queue_db_del(bp, q_list);
+
+ /* check to ensure there is no corruption */
+ if (idx != vnic_id)
+ PMD_DRV_LOG(ERR, "bad vnic idx %d\n", vnic_id);
+ bnxt_vnic_rss_delete(bp, vnic_id);
+ }
+ }
+ return 0;
+}
+
+int32_t
+bnxt_vnic_reta_config_update(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic_info,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint64_t l_bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS] = {0};
+ uint16_t i, sft, idx;
+ uint16_t q_id;
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
+
+ if (!(reta_conf[idx].mask & (1ULL << sft)))
+ continue;
+
+ q_id = reta_conf[idx].reta[sft];
+ if (q_id >= bp->vnic_queue_db.num_queues ||
+ !bp->eth_dev->data->rx_queues[q_id]) {
+ PMD_DRV_LOG(ERR, "Queue id %d is invalid\n", q_id);
+ return -EINVAL;
+ }
+ BNXT_VNIC_BITMAP_SET(l_bitmap, q_id);
+ }
+ /* update the queue bitmap after the validation */
+ memcpy(vnic_info->queue_bitmap, l_bitmap, sizeof(l_bitmap));
+ return 0;
+}
+
+int32_t
+bnxt_vnic_queue_id_is_valid(struct bnxt_vnic_info *vnic_info,
+ uint16_t queue_id)
+{
+ if (BNXT_VNIC_BITMAP_GET(vnic_info->queue_bitmap, queue_id))
+ return 1;
+ return 0;
+}
+
+void
+bnxt_vnic_ring_grp_populate(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ uint32_t i;
+
+ /* check if ring group is supported */
+ if (!BNXT_HAS_RING_GRPS(bp))
+ return;
+
+ /* map ring groups to this vnic */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+ if (bnxt_vnic_queue_id_is_valid(vnic, i) &&
+ bp->rx_queues[i]->rx_started)
+ vnic->fw_grp_ids[i] = bp->grp_info[i].fw_grp_id;
+
+ vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
+}
+
+void
+bnxt_vnic_rules_init(struct bnxt_vnic_info *vnic)
+{
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+}
+
+int32_t
+bnxt_vnic_mru_config(struct bnxt *bp, uint16_t new_mtu)
+{
+ struct bnxt_vnic_info *vnic;
+ uint16_t size = 0;
+ int32_t rc = 0;
+ uint32_t i;
+
+ for (i = 0; i < bp->max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (vnic->fw_vnic_id == INVALID_VNIC_ID)
+ continue;
+
+ vnic->mru = BNXT_VNIC_MRU(new_mtu);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ break;
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ break;
+ }
+ }
+ return rc;
+}
+
+struct bnxt_vnic_info *
+bnxt_vnic_queue_db_get_vnic(struct bnxt *bp, uint16_t vnic_idx)
+{
+ struct bnxt_vnic_info *vnic_info;
+
+ if (vnic_idx >= bp->max_vnics) {
+ PMD_DRV_LOG(ERR, "invalid vnic index %u\n", vnic_idx);
+ return NULL;
+ }
+ vnic_info = &bp->vnic_info[vnic_idx];
+ return vnic_info;
+}
+
+struct bnxt_vnic_info *
+bnxt_vnic_queue_id_get_next(struct bnxt *bp, uint16_t queue_id,
+ uint16_t *vnic_idx)
+{
+ struct bnxt_vnic_info *vnic = NULL;
+ uint16_t i = *vnic_idx;
+
+ while (i < bp->max_vnics) {
+ vnic = &bp->vnic_info[i];
+ if (vnic->ref_cnt && BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
+ queue_id)) {
+ /* found a vnic that has the queue id */
+ *vnic_idx = i;
+ return vnic;
+ }
+ i++;
+ }
+ return NULL;
+}
+
+void
+bnxt_vnic_tpa_cfg(struct bnxt *bp, uint16_t queue_id, bool flag)
+{
+ struct bnxt_vnic_info *vnic = NULL;
+ uint16_t vnic_idx = 0;
+
+ while ((vnic = bnxt_vnic_queue_id_get_next(bp, queue_id,
+ &vnic_idx)) != NULL) {
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, flag);
+ vnic_idx++;
+ }
+}
+
+inline struct bnxt_vnic_info *
+bnxt_get_default_vnic(struct bnxt *bp)
+{
+ return &bp->vnic_info[bp->vnic_queue_db.dflt_vnic_id];
+}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 9055b93c4b..4396d95bda 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -8,11 +8,29 @@
#include <sys/queue.h>
#include <stdbool.h>
+#include <rte_hash.h>
-#define INVALID_VNIC_ID ((uint16_t)-1)
+#define INVALID_VNIC_ID ((uint16_t)-1)
+#define BNXT_RSS_LEVEL_INNERMOST 0x2
+#define BNXT_RSS_LEVEL_OUTERMOST 0x1
+#define BNXT_VNIC_MAX_QUEUE_SIZE 256
+#define BNXT_VNIC_MAX_QUEUE_SZ_IN_8BITS (BNXT_VNIC_MAX_QUEUE_SIZE / 8)
+#define BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS (BNXT_VNIC_MAX_QUEUE_SIZE / 64)
+/* Limit the number of vnic creations*/
+#define BNXT_VNIC_MAX_SUPPORTED_ID 64
-#define BNXT_RSS_LEVEL_INNERMOST 0x2
-#define BNXT_RSS_LEVEL_OUTERMOST 0x1
+#define BNXT_HASH_MODE_DEFAULT HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT
+#define BNXT_HASH_MODE_INNERMOST \
+ (HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 | \
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2)
+#define BNXT_HASH_MODE_OUTERMOST \
+ (HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 | \
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2)
+#define BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp) \
+ ((BNXT_PF(bp) && !((bp)->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)) || \
+ (BNXT_VF(bp) && BNXT_VF_IS_TRUSTED(bp) && \
+ !((bp)->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS_TRUSTED_VF)) || \
+ (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)))
struct bnxt_vnic_info {
STAILQ_ENTRY(bnxt_vnic_info) next;
@@ -28,6 +46,7 @@ struct bnxt_vnic_info {
uint16_t mru;
uint16_t hash_type;
uint8_t hash_mode;
+ uint8_t prev_hash_mode;
const struct rte_memzone *rss_mz;
rte_iova_t rss_table_dma_addr;
uint16_t *rss_table;
@@ -50,11 +69,29 @@ struct bnxt_vnic_info {
bool func_default;
bool bd_stall;
bool rss_dflt_cr;
+ uint16_t ref_cnt;
+ uint64_t queue_bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
STAILQ_HEAD(, bnxt_filter_info) filter;
STAILQ_HEAD(, rte_flow) flow_list;
};
+struct bnxt_vnic_queue_db {
+ uint16_t num_queues;
+ uint16_t dflt_vnic_id;
+ struct rte_hash *rss_q_db;
+};
+
+/* RSS structure to pass values as an structure argument*/
+struct bnxt_vnic_rss_info {
+ uint32_t rss_level;
+ uint64_t rss_types;
+ uint32_t key_len; /**< Hash key length in bytes. */
+ const uint8_t *key; /**< Hash key. */
+ uint32_t queue_num; /**< Number of entries in @p queue. */
+ uint64_t queue_list[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
+};
+
struct bnxt;
int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int pool);
@@ -69,4 +106,39 @@ void bnxt_prandom_bytes(void *dest_ptr, size_t len);
uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type);
int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl);
uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode);
+
+int32_t bnxt_vnic_queue_db_init(struct bnxt *bp);
+int32_t bnxt_vnic_queue_db_deinit(struct bnxt *bp);
+
+void bnxt_vnic_queue_db_update_dlft_vnic(struct bnxt *bp);
+int32_t
+bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+
+int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp, uint16_t q_index,
+ uint16_t *vnic_idx,
+ uint16_t *vnicid);
+int32_t bnxt_vnic_queue_action_free(struct bnxt *bp, uint16_t q_index);
+
+int32_t bnxt_vnic_rss_action_alloc(struct bnxt *bp,
+ struct bnxt_vnic_rss_info *rss_info,
+ uint16_t *queue_id,
+ uint16_t *vnicid);
+int32_t bnxt_vnic_rss_action_free(struct bnxt *bp, uint16_t q_index);
+
+int32_t bnxt_vnic_reta_config_update(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic_info,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int32_t bnxt_vnic_queue_id_is_valid(struct bnxt_vnic_info *vnic_info,
+ uint16_t queue_id);
+void bnxt_vnic_ring_grp_populate(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_vnic_rules_init(struct bnxt_vnic_info *vnic);
+int32_t bnxt_vnic_mru_config(struct bnxt *bp, uint16_t new_mtu);
+struct bnxt_vnic_info *bnxt_vnic_queue_db_get_vnic(struct bnxt *bp,
+ uint16_t vnic_idx);
+struct bnxt_vnic_info *
+bnxt_vnic_queue_id_get_next(struct bnxt *bp, uint16_t queue_id,
+ uint16_t *vnic_idx);
+void bnxt_vnic_tpa_cfg(struct bnxt *bp, uint16_t queue_id, bool flag);
+
#endif
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index ead03a5ea3..c7a0d5f6c9 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -25,6 +25,8 @@ endforeach
headers = files('rte_pmd_bnxt.h')
+deps += ['hash']
+
sources = files(
'bnxt_cpr.c',
'bnxt_ethdev.c',
@@ -41,7 +43,6 @@ sources = files(
'bnxt_util.c',
'bnxt_vnic.c',
'bnxt_reps.c',
-
'rte_pmd_bnxt.c',
)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c
index b09cccedf5..474854d59b 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021-2021 Broadcom
+ * Copyright(c) 2021-2023 Broadcom
* All rights reserved.
*/
@@ -17,6 +17,36 @@
#include "bnxt_tf_common.h"
#include "bnxt_tf_pmd_shim.h"
+int
+bnxt_tunnel_dst_port_free(struct bnxt *bp,
+ uint16_t port,
+ uint8_t type)
+{
+ return bnxt_hwrm_tunnel_dst_port_free(bp,
+ port,
+ type);
+}
+
+int
+bnxt_tunnel_dst_port_alloc(struct bnxt *bp,
+ uint16_t port,
+ uint8_t type)
+{
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp,
+ port,
+ type);
+}
+
+int
+bnxt_tunnel_upar_id_get(struct bnxt *bp,
+ uint8_t type,
+ uint8_t *upar_id)
+{
+ return bnxt_hwrm_tunnel_upar_id_get(bp,
+ upar_id,
+ type);
+}
+
struct bnxt *
bnxt_pmd_get_bp(uint16_t port)
{
@@ -59,7 +89,7 @@ int32_t bnxt_rss_config_action_apply(struct bnxt_ulp_mapper_parms *parms)
BNXT_TF_DBG(ERR, "Invalid bp for port_id %u\n", parms->port_id);
return rc;
}
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
if (vnic == NULL) {
BNXT_TF_DBG(ERR, "default vnic not available for %u\n",
parms->port_id);
@@ -108,7 +138,6 @@ static int32_t glob_error_fn(const char *epath, int32_t eerrno)
return 0;
}
-
static int32_t ulp_pmd_get_mac_by_pci(const char *pci_name, uint8_t *mac)
{
char path[ULP_FILE_PATH_SIZE], dev_str[ULP_FILE_PATH_SIZE];
@@ -244,7 +273,7 @@ bnxt_pmd_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
bp = eth_dev->data->dev_private;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
return vnic->fw_vnic_id;
}
@@ -343,7 +372,6 @@ bnxt_pmd_get_vport(uint16_t port_id)
return (1 << bnxt_pmd_get_phy_port_id(port_id));
}
-
int32_t
bnxt_pmd_set_unicast_rxmask(struct rte_eth_dev *eth_dev)
{
@@ -363,7 +391,7 @@ bnxt_pmd_set_unicast_rxmask(struct rte_eth_dev *eth_dev)
if (bp->vnic_info == NULL)
return 0;
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ vnic = bnxt_get_default_vnic(bp);
old_flags = vnic->flags;
vnic->flags |= BNXT_VNIC_INFO_UCAST;
@@ -376,3 +404,211 @@ bnxt_pmd_set_unicast_rxmask(struct rte_eth_dev *eth_dev)
return rc;
}
+
+int32_t bnxt_pmd_queue_action_create(struct bnxt_ulp_mapper_parms *parms,
+ uint16_t *vnic_idx, uint16_t *vnic_id)
+{
+ struct bnxt *bp = NULL;
+ uint16_t q_index;
+ struct ulp_rte_act_prop *ap = parms->act_prop;
+
+ bp = bnxt_pmd_get_bp(parms->port_id);
+ if (bp == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid bp for port_id %u\n", parms->port_id);
+ return -EINVAL;
+ }
+
+ memcpy(&q_index, &ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
+ BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
+
+ return bnxt_vnic_queue_action_alloc(bp, q_index, vnic_idx, vnic_id);
+}
+
+int32_t bnxt_pmd_queue_action_delete(struct tf *tfp, uint16_t vnic_idx)
+{
+ struct bnxt *bp = NULL;
+
+ bp = tfp->bp;
+ if (bp == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid bp\n");
+ return -EINVAL;
+ }
+ return bnxt_vnic_queue_action_free(bp, vnic_idx);
+}
+
+int32_t bnxt_pmd_rss_action_create(struct bnxt_ulp_mapper_parms *parms,
+ uint16_t *vnic_idx, uint16_t *vnic_id)
+{
+ struct bnxt *bp = NULL;
+ struct bnxt_vnic_rss_info rss_info = {0};
+ struct ulp_rte_act_prop *ap = parms->act_prop;
+
+ bp = bnxt_pmd_get_bp(parms->port_id);
+ if (bp == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid bp for port_id %u\n", parms->port_id);
+ return -EINVAL;
+ }
+
+ /* get the details */
+ memset(&rss_info, 0, sizeof(rss_info));
+ memcpy(&rss_info.rss_types,
+ &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES],
+ BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
+ memcpy(&rss_info.rss_level,
+ &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL],
+ BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
+ memcpy(&rss_info.key_len,
+ &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
+ BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
+ if (rss_info.key_len)
+ rss_info.key = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY];
+ memcpy(&rss_info.queue_num,
+ &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
+ BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
+
+ /* Validate the size of the queue list */
+ if (sizeof(rss_info.queue_list) < BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE) {
+ BNXT_TF_DBG(ERR, "Mismatch of RSS queue size in template\n");
+ return -EINVAL;
+ }
+ memcpy(rss_info.queue_list,
+ &ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
+ BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
+
+ return bnxt_vnic_rss_action_alloc(bp, &rss_info, vnic_idx, vnic_id);
+}
+
+int32_t bnxt_pmd_rss_action_delete(struct tf *tfp, uint16_t vnic_idx)
+{
+ struct bnxt *bp = tfp->bp;
+
+ if (bp == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid bp\n");
+ return -EINVAL;
+ }
+ return bnxt_vnic_rss_action_free(bp, vnic_idx);
+}
+
+#define ULP_GLOBAL_TUNNEL_PORT_ID_SHIFT 16
+#define ULP_GLOBAL_TUNNEL_PORT_ID_MASK ((uint16_t)0xffff)
+#define ULP_GLOBAL_TUNNEL_UPARID_SHIFT 8
+#define ULP_GLOBAL_TUNNEL_UPARID_MASK ((uint16_t)0xff)
+#define ULP_GLOBAL_TUNNEL_TYPE_SHIFT 0
+#define ULP_GLOBAL_TUNNEL_TYPE_MASK ((uint16_t)0xffff)
+
+/* Extracts the dpdk port id and tunnel type from the handle */
+static void
+bnxt_pmd_global_reg_hndl_to_data(uint32_t handle, uint16_t *port,
+ uint8_t *upar_id, uint8_t *type)
+{
+ *type = (handle >> ULP_GLOBAL_TUNNEL_TYPE_SHIFT) &
+ ULP_GLOBAL_TUNNEL_TYPE_MASK;
+ *upar_id = (handle >> ULP_GLOBAL_TUNNEL_UPARID_SHIFT) &
+ ULP_GLOBAL_TUNNEL_UPARID_MASK;
+ *port = (handle >> ULP_GLOBAL_TUNNEL_PORT_ID_SHIFT) &
+ ULP_GLOBAL_TUNNEL_PORT_ID_MASK;
+}
+
+/* Packs the dpdk port id and tunnel type in the handle */
+static void
+bnxt_pmd_global_reg_data_to_hndl(uint16_t port_id, uint8_t upar_id,
+ uint8_t type, uint32_t *handle)
+{
+ *handle = (port_id & ULP_GLOBAL_TUNNEL_PORT_ID_MASK) <<
+ ULP_GLOBAL_TUNNEL_PORT_ID_SHIFT;
+ *handle |= (upar_id & ULP_GLOBAL_TUNNEL_UPARID_MASK) <<
+ ULP_GLOBAL_TUNNEL_UPARID_SHIFT;
+ *handle |= (type & ULP_GLOBAL_TUNNEL_TYPE_MASK) <<
+ ULP_GLOBAL_TUNNEL_TYPE_SHIFT;
+}
+
+static struct bnxt_global_tunnel_info
+ ulp_global_tunnel_db[BNXT_GLOBAL_REGISTER_TUNNEL_MAX] = {{0}};
+/* Sets or resets the tunnel ports.
+ * If dport == 0, then the port_id and type are retrieved from the handle.
+ * otherwise, the incoming port_id, type, and dport are used.
+ * The type is enum ulp_mapper_ulp_global_tunnel_type
+ */
+int32_t
+bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
+ uint16_t udp_port, uint32_t *handle)
+{
+ uint16_t lport_id, ldport;
+ uint8_t hwtype, ltype, lupar_id;
+ struct bnxt *bp;
+ int32_t rc = 0;
+
+ /* convert to HWRM type */
+ switch (type) {
+ case BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN:
+ hwtype = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Tunnel Type (%d) invalid\n", type);
+ return -EINVAL;
+ }
+
+ if (!udp_port) {
+ /* Free based on the handle */
+ if (!handle) {
+ BNXT_TF_DBG(ERR, "Free with invalid handle\n");
+ return -EINVAL;
+ }
+ bnxt_pmd_global_reg_hndl_to_data(*handle, &lport_id,
+ &lupar_id, <ype);
+
+ bp = bnxt_pmd_get_bp(lport_id);
+ if (!bp) {
+ BNXT_TF_DBG(ERR, "Unable to get dev by port %d\n",
+ lport_id);
+ return -EINVAL;
+ }
+
+ if (!ulp_global_tunnel_db[ltype].ref_cnt)
+ return 0;
+ ldport = ulp_global_tunnel_db[ltype].dport;
+ rc = bnxt_hwrm_tunnel_dst_port_free(bp, ldport, hwtype);
+ if (rc) {
+ BNXT_TF_DBG(ERR,
+ "Unable to free tunnel dst port (%d)\n",
+ ldport);
+ return rc;
+ }
+ ulp_global_tunnel_db[ltype].ref_cnt--;
+ if (ulp_global_tunnel_db[ltype].ref_cnt == 0)
+ ulp_global_tunnel_db[ltype].dport = 0;
+ } else {
+ bp = bnxt_pmd_get_bp(port_id);
+ if (!bp) {
+ BNXT_TF_DBG(ERR, "Unable to get dev by port %d\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_port, hwtype);
+ if (!rc) {
+ ulp_global_tunnel_db[type].ref_cnt++;
+ ulp_global_tunnel_db[type].dport = udp_port;
+ bnxt_pmd_global_reg_data_to_hndl(port_id, 0,
+ type, handle);
+ }
+ }
+ return rc;
+}
+
+#define BNXT_ULP_HOT_UP_DYNAMIC_ENV_VAR "BNXT_ULP_T_HA_SUPPORT"
+/* This function queries the linux shell variable to determine
+ * whether Hot upgrade should be disabled or not.
+ * If BNXT_ULP_T_HA_SUPPORT is set to zero explicitly then
+ * hotupgrade is disabled.
+ */
+int32_t bnxt_pmd_get_hot_upgrade_env(void)
+{
+ char *env;
+ int32_t hot_up = 1;
+
+ env = getenv(BNXT_ULP_HOT_UP_DYNAMIC_ENV_VAR);
+ if (env && strcmp(env, "0") == 0)
+ hot_up = 0;
+ return hot_up;
+}
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.h b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.h
index d6d7a1f0af..b76e4b849d 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021-2021 Broadcom
+ * Copyright(c) 2021-2023 Broadcom
* All rights reserved.
*/
@@ -9,6 +9,19 @@
#include "bnxt_tf_common.h"
#include "ulp_mapper.h"
+/* Simple structure to manage the custom global tunnel */
+struct bnxt_global_tunnel_info {
+ uint16_t dport;
+ uint16_t ref_cnt;
+};
+
+/* Internal Tunnel type, */
+enum bnxt_global_register_tunnel_type {
+ BNXT_GLOBAL_REGISTER_TUNNEL_UNUSED = 0,
+ BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN,
+ BNXT_GLOBAL_REGISTER_TUNNEL_MAX
+};
+
int32_t bnxt_rss_config_action_apply(struct bnxt_ulp_mapper_parms *parms);
int32_t bnxt_pmd_get_parent_mac_addr(struct bnxt_ulp_mapper_parms *parms,
uint8_t *mac);
@@ -25,4 +38,24 @@ uint16_t bnxt_pmd_get_phy_port_id(uint16_t port);
uint16_t bnxt_pmd_get_vport(uint16_t port);
enum bnxt_ulp_intf_type bnxt_pmd_get_interface_type(uint16_t port);
int32_t bnxt_pmd_set_unicast_rxmask(struct rte_eth_dev *eth_dev);
+int32_t bnxt_pmd_queue_action_create(struct bnxt_ulp_mapper_parms *parms,
+ uint16_t *vnic_idx, uint16_t *vnic_id);
+int32_t bnxt_pmd_queue_action_delete(struct tf *tfp, uint16_t vnic_idx);
+int32_t bnxt_pmd_rss_action_create(struct bnxt_ulp_mapper_parms *parms,
+ uint16_t *vnic_idx, uint16_t *vnic_id);
+int32_t bnxt_pmd_rss_action_delete(struct tf *tfp, uint16_t vnic_idx);
+int32_t bnxt_tunnel_dst_port_free(struct bnxt *bp,
+ uint16_t port,
+ uint8_t type);
+int32_t bnxt_tunnel_dst_port_alloc(struct bnxt *bp,
+ uint16_t port,
+ uint8_t type);
+int32_t
+bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
+ uint16_t udp_port, uint32_t *handle);
+int32_t
+bnxt_tunnel_upar_id_get(struct bnxt *bp,
+ uint8_t type,
+ uint8_t *upar_id);
+int32_t bnxt_pmd_get_hot_upgrade_env(void);
#endif /* _BNXT_TF_PMD_ABSTRACT_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 109bd0652a..08eb0c6063 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -78,7 +78,7 @@ bnxt_ulp_devid_get(struct bnxt *bp,
if (BNXT_STINGRAY(bp))
*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
else
- /* Assuming Whitney */
+ /* Assuming P4 */
*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
return 0;
@@ -340,12 +340,62 @@ bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
return rc;
}
+/* Function to set the hot upgrade support into the context */
+static int
+bnxt_ulp_multi_shared_session_support_set(struct bnxt *bp,
+ enum bnxt_ulp_device_id devid,
+ uint32_t fw_hu_update)
+{
+ struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx;
+ struct tf_get_version_parms v_params = { 0 };
+ struct tf *tfp;
+ int32_t rc = 0;
+ int32_t new_fw = 0;
+
+ v_params.device_type = bnxt_ulp_cntxt_convert_dev_id(devid);
+ v_params.bp = bp;
+
+ tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT);
+ rc = tf_get_version(tfp, &v_params);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get tf version.\n");
+ return rc;
+ }
+
+ if (v_params.major == 1 && v_params.minor == 0 &&
+ v_params.update == 1) {
+ new_fw = 1;
+ }
+ /* if the version update is greater than 0 then set support for
+ * multiple version
+ */
+ if (new_fw) {
+ ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_MULTI_SHARED_SUPPORT;
+ ulp_ctx->cfg_data->hu_session_type =
+ BNXT_ULP_SESSION_TYPE_SHARED;
+ }
+ if (!new_fw && fw_hu_update) {
+ ulp_ctx->cfg_data->ulp_flags &= ~BNXT_ULP_HIGH_AVAIL_ENABLED;
+ ulp_ctx->cfg_data->hu_session_type =
+ BNXT_ULP_SESSION_TYPE_SHARED |
+ BNXT_ULP_SESSION_TYPE_SHARED_OWC;
+ }
+
+ if (!new_fw && !fw_hu_update) {
+ ulp_ctx->cfg_data->hu_session_type =
+ BNXT_ULP_SESSION_TYPE_SHARED |
+ BNXT_ULP_SESSION_TYPE_SHARED_OWC;
+ }
+
+ return rc;
+}
+
int32_t
bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
uint8_t app_id, uint32_t dev_id)
{
struct bnxt_ulp_app_capabilities_info *info;
- uint32_t num = 0;
+ uint32_t num = 0, fw = 0;
uint16_t i;
bool found = false;
struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx;
@@ -375,15 +425,49 @@ bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
ulp_ctx->cfg_data->ulp_flags |=
BNXT_ULP_APP_UNICAST_ONLY;
+ if (info[i].flags & BNXT_ULP_APP_CAP_IP_TOS_PROTO_SUPPORT)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_TOS_PROTO_SUPPORT;
+ if (info[i].flags & BNXT_ULP_APP_CAP_BC_MC_SUPPORT)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_BC_MC_SUPPORT;
if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) {
/* Enable socket direction only if MR is enabled in fw*/
if (BNXT_MULTIROOT_EN(bp)) {
ulp_ctx->cfg_data->ulp_flags |=
BNXT_ULP_APP_SOCKET_DIRECT;
- BNXT_TF_DBG(DEBUG,
- "Socket Direct feature is enabled");
+ BNXT_TF_DBG(INFO,
+ "Socket Direct feature is enabled\n");
}
}
+ if (info[i].flags & BNXT_ULP_APP_CAP_HA_DYNAMIC) {
+ /* Read the environment variable to determine hot up */
+ if (!bnxt_pmd_get_hot_upgrade_env()) {
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_HA_DYNAMIC;
+ /* reset Hot upgrade, dynamically disabled */
+ ulp_ctx->cfg_data->ulp_flags &=
+ ~BNXT_ULP_HIGH_AVAIL_ENABLED;
+ ulp_ctx->cfg_data->def_session_type =
+ BNXT_ULP_SESSION_TYPE_DEFAULT_NON_HA;
+ BNXT_TF_DBG(INFO, "Hot upgrade disabled.\n");
+ }
+ }
+
+ bnxt_ulp_vxlan_ip_port_set(ulp_ctx, info[i].vxlan_ip_port);
+ bnxt_ulp_vxlan_port_set(ulp_ctx, info[i].vxlan_port);
+
+ /* set the shared session support from firmware */
+ fw = info[i].upgrade_fw_update;
+ if (ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags) &&
+ bnxt_ulp_multi_shared_session_support_set(bp, dev_id, fw)) {
+ BNXT_TF_DBG(ERR,
+ "Unable to get shared session support\n");
+ return -EINVAL;
+ }
+ bnxt_ulp_ha_reg_set(ulp_ctx, info[i].ha_reg_state,
+ info[i].ha_reg_cnt);
+ ulp_ctx->cfg_data->ha_pool_id = info[i].ha_pool_id;
}
if (!found) {
BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
@@ -1027,6 +1111,11 @@ ulp_ctx_init(struct bnxt *bp,
goto error_deinit;
}
+ if (BNXT_TESTPMD_EN(bp)) {
+ ulp_data->ulp_flags &= ~BNXT_ULP_VF_REP_ENABLED;
+ BNXT_TF_DBG(ERR, "Enabled Testpmd forward mode\n");
+ }
+
/*
* Shared session must be created before first regular session but after
* the ulp_ctx is valid.
@@ -1055,7 +1144,6 @@ ulp_ctx_init(struct bnxt *bp,
}
bnxt_ulp_cntxt_num_shared_clients_set(bp->ulp_ctx, true);
-
/* Open the ulp session. */
rc = ulp_ctx_session_open(bp, session);
if (rc)
@@ -1181,7 +1269,7 @@ ulp_ctx_attach(struct bnxt *bp,
tfp->session = NULL;
return rc;
}
-
+ tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT);
bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT, tfp);
return rc;
}
@@ -1427,7 +1515,8 @@ bnxt_ulp_deinit(struct bnxt *bp,
return;
ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
- if (ha_enabled && session->session_opened) {
+ if (ha_enabled &&
+ bnxt_ulp_session_is_open(session, BNXT_ULP_SESSION_TYPE_DEFAULT)) {
int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
if (rc)
BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
@@ -1490,6 +1579,7 @@ bnxt_ulp_init(struct bnxt *bp,
struct bnxt_ulp_session_state *session)
{
int rc;
+ uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
/* Allocate and Initialize the ulp context. */
rc = ulp_ctx_init(bp, session);
@@ -1584,6 +1674,13 @@ bnxt_ulp_init(struct bnxt *bp,
goto jump_to_error;
}
}
+
+ rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
+ return rc;
+ }
+
BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
return rc;
@@ -1592,6 +1689,30 @@ bnxt_ulp_init(struct bnxt *bp,
return rc;
}
+static int
+ulp_cust_vxlan_alloc(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_alloc(bp,
+ bp->ulp_ctx->cfg_data->vxlan_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to set global vxlan port\n");
+ }
+
+ if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_alloc(bp,
+ bp->ulp_ctx->cfg_data->vxlan_ip_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to set global custom vxlan_ip port\n");
+ }
+
+ return rc;
+}
+
/*
* When a port is initialized by dpdk. This functions sets up
* the port specific details.
@@ -1686,6 +1807,7 @@ bnxt_ulp_port_init(struct bnxt *bp)
BNXT_TF_DBG(ERR, "Failed to update port database\n");
goto jump_to_error;
}
+
/* create the default rules */
rc = bnxt_ulp_create_df_rules(bp);
if (rc) {
@@ -1711,6 +1833,10 @@ bnxt_ulp_port_init(struct bnxt *bp)
}
}
+ rc = ulp_cust_vxlan_alloc(bp);
+ if (rc)
+ goto jump_to_error;
+
return rc;
jump_to_error:
@@ -1718,6 +1844,28 @@ bnxt_ulp_port_init(struct bnxt *bp)
return rc;
}
+static void
+ulp_cust_vxlan_free(struct bnxt *bp)
+{
+ int rc;
+
+ if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_free(bp,
+ bp->ulp_ctx->cfg_data->vxlan_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to clear global vxlan port\n");
+ }
+
+ if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_free(bp,
+ bp->ulp_ctx->cfg_data->vxlan_ip_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to clear global custom vxlan port\n");
+ }
+}
+
/*
* When a port is de-initialized by dpdk. This functions clears up
* the port specific details.
@@ -1770,6 +1918,9 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
bp->ulp_ctx->cfg_data->ref_cnt--;
if (bp->ulp_ctx->cfg_data->ref_cnt) {
+ /* Free tunnel configurations */
+ ulp_cust_vxlan_free(bp);
+
/* free the port details */
/* Free the default flow rule associated to this port */
bnxt_ulp_destroy_df_rules(bp, false);
@@ -2201,6 +2352,45 @@ bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
}
+/* Function to extract the action type from the shared action handle. */
+int32_t
+bnxt_get_action_handle_type(const struct rte_flow_action_handle *handle,
+ uint32_t *action_handle_type)
+{
+ if (!action_handle_type)
+ return -EINVAL;
+
+ *action_handle_type = (uint32_t)(((uint64_t)handle >> 32) & 0xffffffff);
+ if (*action_handle_type >= BNXT_ULP_GEN_TBL_MAX_SZ)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Function to extract the direction from the shared action handle. */
+int32_t
+bnxt_get_action_handle_direction(const struct rte_flow_action_handle *handle,
+ uint32_t *dir)
+{
+ uint32_t shared_type;
+ int32_t ret = 0;
+
+ ret = bnxt_get_action_handle_type(handle, &shared_type);
+ if (ret)
+ return ret;
+
+ *dir = shared_type & 0x1 ? BNXT_ULP_DIR_EGRESS : BNXT_ULP_DIR_INGRESS;
+
+ return ret;
+}
+
+/* Function to extract the action index from the shared action handle. */
+uint32_t
+bnxt_get_action_handle_index(const struct rte_flow_action_handle *handle)
+{
+ return (uint32_t)((uint64_t)handle & 0xffffffff);
+}
+
/* Function to set the ha info into the context */
int32_t
bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
@@ -2306,6 +2496,13 @@ bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
return ulp->cfg_data->app_tun;
}
+/* Function to get the truflow app id. This defined in the build file */
+uint32_t
+bnxt_ulp_default_app_id_get(void)
+{
+ return BNXT_TF_APP_ID;
+}
+
/* Function to convert ulp dev id to regular dev id. */
uint32_t
bnxt_ulp_cntxt_convert_dev_id(uint32_t ulp_dev_id)
@@ -2329,6 +2526,53 @@ bnxt_ulp_cntxt_convert_dev_id(uint32_t ulp_dev_id)
return type;
}
+/* This function sets the IF table index for the
+ * Application to poll to get the hot upgrade state and count details from
+ * the firmware.
+ */
+int32_t
+bnxt_ulp_ha_reg_set(struct bnxt_ulp_context *ulp_ctx,
+ uint8_t state, uint8_t cnt)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return -EINVAL;
+
+ if (ULP_MULTI_SHARED_IS_SUPPORTED(ulp_ctx)) {
+ ulp_ctx->cfg_data->hu_reg_state = state;
+ ulp_ctx->cfg_data->hu_reg_cnt = cnt;
+ } else {
+ ulp_ctx->cfg_data->hu_reg_state = ULP_HA_IF_TBL_IDX;
+ ulp_ctx->cfg_data->hu_reg_cnt = ULP_HA_CLIENT_CNT_IF_TBL_IDX;
+ }
+ return 0;
+}
+
+/* This function gets the IF table index for the
+ * application to poll to get the application hot upgrade state from
+ * the firmware.
+ */
+uint32_t
+bnxt_ulp_ha_reg_state_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return 0;
+
+ return (uint32_t)ulp_ctx->cfg_data->hu_reg_state;
+}
+
+/* This function gets the IF table index for the
+ * Application to poll to get the application count from
+ * the firmware.
+ */
+uint32_t
+bnxt_ulp_ha_reg_cnt_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return 0;
+
+ return (uint32_t)ulp_ctx->cfg_data->hu_reg_cnt;
+}
+
struct tf*
bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type)
{
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 9b30851b13..53d76e1465 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -386,4 +386,5 @@ bnxt_ulp_ha_reg_cnt_get(struct bnxt_ulp_context *ulp_ctx);
struct tf*
bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type);
+
#endif /* _BNXT_ULP_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
index 55885d1b8c..ad04644db4 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -14,6 +14,8 @@
#include "ulp_ha_mgr.h"
#include "ulp_tun.h"
#include <rte_malloc.h>
+#include "ulp_template_db_tbl.h"
+#include "tfp.h"
static int32_t
bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr,
@@ -78,6 +80,17 @@ bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params,
#endif
}
+static inline void
+bnxt_ulp_init_parser_cf_defaults(struct ulp_rte_parser_params *params,
+ uint16_t port_id)
+{
+ /* Set up defaults for Comp field */
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_INCOMING_IF, port_id);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_PORT_ID, port_id);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
+ BNXT_ULP_INVALID_SVIF_VAL);
+}
+
void
bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
struct ulp_rte_parser_params *params,
@@ -130,6 +143,10 @@ bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
ULP_COMP_FLD_IDX_WR(params,
BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG,
1);
+ } else {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_HA_SUPPORT_DISABLED,
+ 1);
}
/* Update the socket direct flag */
@@ -197,13 +214,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
/* Set the flow attributes */
bnxt_ulp_set_dir_attributes(¶ms, attr);
- /* copy the device port id and direction for further processing */
- ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_INCOMING_IF,
- dev->data->port_id);
- ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DEV_PORT_ID,
- dev->data->port_id);
- ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_SVIF_FLAG,
- BNXT_ULP_INVALID_SVIF_VAL);
+ bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id);
/* Get the function id */
if (ulp_port_db_port_func_id_get(ulp_ctx,
@@ -320,6 +331,7 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
/* Set the flow attributes */
bnxt_ulp_set_dir_attributes(¶ms, attr);
+ bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id);
/* Parse the rte flow pattern */
ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
@@ -494,6 +506,256 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
return rc;
}
+static int32_t
+bnxt_ulp_action_handle_chk_args(const struct rte_flow_action *action,
+ const struct rte_flow_indir_action_conf *conf)
+{
+ if (!action || !conf)
+ return BNXT_TF_RC_ERROR;
+ /* shared action only allowed to have one direction */
+ if (conf->ingress == 1 && conf->egress == 1)
+ return BNXT_TF_RC_ERROR;
+ /* shared action must have at least one direction */
+ if (conf->ingress == 0 && conf->egress == 0)
+ return BNXT_TF_RC_ERROR;
+ return BNXT_TF_RC_SUCCESS;
+}
+
+static inline void
+bnxt_ulp_set_action_handle_dir_attr(struct ulp_rte_parser_params *params,
+ const struct rte_flow_indir_action_conf *conf)
+{
+ if (conf->ingress == 1)
+ params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS;
+ else if (conf->egress == 1)
+ params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS;
+}
+
+static struct rte_flow_action_handle *
+bnxt_ulp_action_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ enum bnxt_ulp_intf_type port_type = BNXT_ULP_INTF_TYPE_INVALID;
+ struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+ struct ulp_rte_parser_params params;
+ struct bnxt_ulp_context *ulp_ctx;
+ uint32_t act_tid;
+ uint16_t func_id;
+ uint32_t ifindex;
+ int ret = BNXT_TF_RC_ERROR;
+ const struct rte_flow_action actions[2] = {
+ {
+ .type = action->type,
+ .conf = action->conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END
+ }
+ };
+
+ if (bnxt_ulp_action_handle_chk_args(action, conf) != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
+ if (!ulp_ctx) {
+ BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
+ goto parse_error;
+ }
+
+ /* Initialize the parser params */
+ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
+ params.ulp_ctx = ulp_ctx;
+
+ ULP_BITMAP_SET(params.act_bitmap.bits, BNXT_ULP_ACT_BIT_SHARED);
+
+ /* Set the shared action direction attribute */
+ bnxt_ulp_set_action_handle_dir_attr(¶ms, conf);
+
+ /* perform the conversion from dpdk port to bnxt ifindex */
+ if (ulp_port_db_dev_port_to_ulp_index(ulp_ctx,
+ dev->data->port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "Port id is not valid\n");
+ goto parse_error;
+ }
+ port_type = ulp_port_db_port_type_get(ulp_ctx, ifindex);
+ if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
+ BNXT_TF_DBG(ERR, "Port type is not valid\n");
+ goto parse_error;
+ }
+
+ bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id);
+
+ /* Emulating the match port for direction processing */
+ ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
+ port_type);
+
+ if ((params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
+ port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
+ ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION,
+ BNXT_ULP_DIR_EGRESS);
+ } else {
+ /* Assign the input direction */
+ if (params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
+ ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION,
+ BNXT_ULP_DIR_INGRESS);
+ else
+ ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION,
+ BNXT_ULP_DIR_EGRESS);
+ }
+
+ /* Parse the shared action */
+ ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ /* Perform the rte flow post process */
+ bnxt_ulp_rte_parser_post_process(¶ms);
+
+ /* do the tunnel offload process if any */
+ ret = ulp_tunnel_offload_process(¶ms);
+ if (ret == BNXT_TF_RC_ERROR)
+ goto parse_error;
+
+ ret = ulp_matcher_action_match(¶ms, &act_tid);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ bnxt_ulp_init_mapper_params(&mparms, ¶ms,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+ mparms.act_tid = act_tid;
+
+ /* Get the function id */
+ if (ulp_port_db_port_func_id_get(ulp_ctx,
+ dev->data->port_id,
+ &func_id)) {
+ BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
+ goto parse_error;
+ }
+
+ /* Protect flow creation */
+ if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
+ BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
+ goto parse_error;
+ }
+
+ ret = ulp_mapper_flow_create(params.ulp_ctx, &mparms);
+ bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
+
+ if (ret)
+ goto parse_error;
+
+ return (struct rte_flow_action_handle *)((uintptr_t)mparms.shared_hndl);
+
+parse_error:
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create shared action.");
+ return NULL;
+}
+
+static int
+bnxt_ulp_action_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_handle *shared_hndl,
+ struct rte_flow_error *error)
+{
+ struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+ struct bnxt_ulp_shared_act_info *act_info;
+ struct ulp_rte_parser_params params;
+ struct ulp_rte_act_prop *act_prop;
+ struct bnxt_ulp_context *ulp_ctx;
+ enum bnxt_ulp_direction_type dir;
+ uint32_t act_tid, act_info_entries;
+ int ret = BNXT_TF_RC_ERROR;
+ uint32_t shared_action_type;
+ uint64_t tmp64;
+
+ ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
+ if (!ulp_ctx) {
+ BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
+ goto parse_error;
+ }
+
+ if (!shared_hndl) {
+ BNXT_TF_DBG(ERR, "Invalid argument of shared handle\n");
+ goto parse_error;
+ }
+
+ act_prop = ¶ms.act_prop;
+ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params));
+ params.ulp_ctx = ulp_ctx;
+
+ if (bnxt_ulp_cntxt_app_id_get(ulp_ctx, ¶ms.app_id)) {
+ BNXT_TF_DBG(ERR, "failed to get the app id\n");
+ goto parse_error;
+ }
+ /* The template will delete the entry if there are no references */
+ if (bnxt_get_action_handle_type(shared_hndl, &shared_action_type)) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle\n");
+ goto parse_error;
+ }
+
+ act_info_entries = 0;
+ act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
+ if (shared_action_type >= act_info_entries || !act_info) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle\n");
+ goto parse_error;
+ }
+
+ ULP_BITMAP_SET(params.act_bitmap.bits,
+ act_info[shared_action_type].act_bitmask);
+ ULP_BITMAP_SET(params.act_bitmap.bits, BNXT_ULP_ACT_BIT_DELETE);
+
+ ret = bnxt_get_action_handle_direction(shared_hndl, &dir);
+ if (ret) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle dir\n");
+ goto parse_error;
+ }
+
+ if (dir == BNXT_ULP_DIR_EGRESS) {
+ params.dir_attr = BNXT_ULP_FLOW_ATTR_EGRESS;
+ ULP_BITMAP_SET(params.act_bitmap.bits,
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR);
+ } else {
+ params.dir_attr = BNXT_ULP_FLOW_ATTR_INGRESS;
+ ULP_BITMAP_SET(params.act_bitmap.bits,
+ BNXT_ULP_FLOW_DIR_BITMASK_ING);
+ }
+
+ tmp64 = tfp_cpu_to_be_64((uint64_t)
+ bnxt_get_action_handle_index(shared_hndl));
+
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
+ &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
+
+ ret = ulp_matcher_action_match(¶ms, &act_tid);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto parse_error;
+
+ bnxt_ulp_init_mapper_params(&mparms, ¶ms,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+ mparms.act_tid = act_tid;
+
+ if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
+ BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
+ goto parse_error;
+ }
+
+ ret = ulp_mapper_flow_create(ulp_ctx, &mparms);
+ bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
+ if (ret)
+ goto parse_error;
+
+ return 0;
+
+parse_error:
+ rte_flow_error_set(error, BNXT_TF_RC_ERROR,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy shared action.");
+ return -EINVAL;
+}
+
/* Tunnel offload Apis */
#define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS 1
@@ -685,6 +947,8 @@ const struct rte_flow_ops bnxt_ulp_rte_flow_ops = {
.flush = bnxt_ulp_flow_flush,
.query = bnxt_ulp_flow_query,
.isolate = NULL,
+ .action_handle_create = bnxt_ulp_action_handle_create,
+ .action_handle_destroy = bnxt_ulp_action_handle_destroy,
/* Tunnel offload callbacks */
.tunnel_decap_set = bnxt_ulp_tunnel_decap_set,
.tunnel_match = bnxt_ulp_tunnel_match,
diff --git a/drivers/net/bnxt/tf_ulp/meson.build b/drivers/net/bnxt/tf_ulp/meson.build
index 71094b9974..c7df7e42f1 100644
--- a/drivers/net/bnxt/tf_ulp/meson.build
+++ b/drivers/net/bnxt/tf_ulp/meson.build
@@ -1,28 +1,29 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-# Copyright(c) 2021 Broadcom
+# Copyright(c) 2023 Broadcom
#Include the folder for headers
includes += include_directories('.')
+cflags += '-DBNXT_TF_APP_ID=0'
#Add the source files
sources += files(
+ 'bnxt_tf_pmd_shim.c',
'bnxt_ulp.c',
- 'ulp_mark_mgr.c',
- 'ulp_flow_db.c',
- 'ulp_utils.c',
- 'ulp_mapper.c',
- 'ulp_matcher.c',
- 'ulp_rte_parser.c',
'bnxt_ulp_flow.c',
- 'ulp_port_db.c',
'ulp_def_rules.c',
'ulp_fc_mgr.c',
- 'ulp_tun.c',
- 'bnxt_tf_pmd_shim.c',
- 'ulp_gen_tbl.c',
+ 'ulp_flow_db.c',
'ulp_gen_hash.c',
+ 'ulp_gen_tbl.c',
'ulp_ha_mgr.c',
- 'ulp_rte_handler_tbl.c')
+ 'ulp_mapper.c',
+ 'ulp_mark_mgr.c',
+ 'ulp_matcher.c',
+ 'ulp_port_db.c',
+ 'ulp_rte_handler_tbl.c',
+ 'ulp_rte_parser.c',
+ 'ulp_tun.c',
+ 'ulp_utils.c')
subdir('generic_templates')
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index dee2c04b24..c39cde39aa 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -438,8 +438,8 @@ void
ulp_fc_mgr_alarm_cb(void *arg)
{
int rc = 0;
- unsigned int j = 0;
- enum tf_dir i = 0;
+ unsigned int j;
+ enum tf_dir i;
struct bnxt_ulp_context *ctxt;
struct bnxt_ulp_fc_info *ulp_fc_info;
struct bnxt_ulp_device_params *dparms;
@@ -473,14 +473,6 @@ ulp_fc_mgr_alarm_cb(void *arg)
return;
}
- tfp = bnxt_ulp_cntxt_tfp_get(ctxt,
- ulp_fc_info->sw_acc_tbl[i][j].session_type);
- if (!tfp) {
- BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
- bnxt_ulp_cntxt_entry_release();
- return;
- }
-
/*
* Take the fc_lock to ensure no flow is destroyed
* during the bulk get
@@ -667,6 +659,7 @@ int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
+ ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c b/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c
index 5279beb764..ebf32d6702 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -259,23 +259,26 @@ ulp_mapper_gen_tbl_entry_free(struct bnxt_ulp_context *ulp_ctx,
uint32_t tbl_idx, uint32_t ckey)
{
struct ulp_flow_db_res_params res;
+ uint32_t fid = 0; /* not using for this case */
res.direction = tbl_idx & 0x1;
res.resource_sub_type = tbl_idx >> 1;
res.resource_hndl = ckey;
- return ulp_mapper_gen_tbl_res_free(ulp_ctx, &res);
+ return ulp_mapper_gen_tbl_res_free(ulp_ctx, fid, &res);
}
/* Free the generic table list resource
*
* ulp_ctx [in] - Pointer to the ulp context
+ * fid [in] - The fid the generic table is associated with
* res [in] - Pointer to flow db resource entry
*
* returns 0 on success
*/
int32_t
ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t fid,
struct ulp_flow_db_res_params *res)
{
struct bnxt_ulp_mapper_data *mapper_data;
@@ -283,7 +286,7 @@ ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx,
struct ulp_mapper_gen_tbl_entry entry;
struct ulp_gen_hash_entry_params hash_entry;
int32_t tbl_idx;
- uint32_t fid = 0;
+ uint32_t rid = 0;
uint32_t key_idx;
/* Extract the resource sub type and direction */
@@ -326,9 +329,10 @@ ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx,
/* Decrement the reference count */
if (!ULP_GEN_TBL_REF_CNT(&entry)) {
- BNXT_TF_DBG(ERR, "generic table corrupt %x:%" PRIX64 "\n",
+ BNXT_TF_DBG(DEBUG,
+ "generic table entry already free %x:%" PRIX64 "\n",
tbl_idx, res->resource_hndl);
- return -EINVAL;
+ return 0;
}
ULP_GEN_TBL_REF_CNT_DEC(&entry);
@@ -336,24 +340,27 @@ ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx,
if (ULP_GEN_TBL_REF_CNT(&entry))
return 0;
- /* Delete the generic table entry. First extract the fid */
+ /* Delete the generic table entry. First extract the rid */
if (ulp_mapper_gen_tbl_entry_data_get(&entry, ULP_GEN_TBL_FID_OFFSET,
ULP_GEN_TBL_FID_SIZE_BITS,
- (uint8_t *)&fid,
- sizeof(fid))) {
- BNXT_TF_DBG(ERR, "Unable to get fid %x:%" PRIX64 "\n",
+ (uint8_t *)&rid,
+ sizeof(rid))) {
+ BNXT_TF_DBG(ERR, "Unable to get rid %x:%" PRIX64 "\n",
tbl_idx, res->resource_hndl);
return -EINVAL;
}
- fid = tfp_be_to_cpu_32(fid);
- /* no need to del if fid is 0 since there is no associated resource */
- if (fid) {
+ rid = tfp_be_to_cpu_32(rid);
+ /* no need to del if rid is 0 since there is no associated resource
+ * if rid from the entry is equal to the incoming fid, then we have a
+ * recursive delete, so don't follow the rid.
+ */
+ if (rid && rid != fid) {
/* Destroy the flow associated with the shared flow id */
if (ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_RID,
- fid))
+ rid))
BNXT_TF_DBG(ERR,
- "Error in deleting shared flow id %x\n",
- fid);
+ "Error in deleting shared resource id %x\n",
+ rid);
}
/* Delete the entry from the hash table */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.h b/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.h
index 3060072967..4c5a6e176f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_gen_tbl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -134,12 +134,14 @@ ulp_mapper_gen_tbl_entry_data_get(struct ulp_mapper_gen_tbl_entry *entry,
* Free the generic table list resource
*
* ulp_ctx [in] - Pointer to the ulp context
+ * fid [in] - The fid the generic table is associated with
* res [in] - Pointer to flow db resource entry
*
* returns 0 on success
*/
int32_t
ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t fid,
struct ulp_flow_db_res_params *res);
/* Free the generic table list entry
diff --git a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
index 42482b596f..f3f5bda890 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
@@ -23,8 +23,6 @@
#define ULP_HA_IF_TBL_DIR TF_DIR_RX
#define ULP_HA_IF_TBL_TYPE TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR
-#define ULP_HA_IF_TBL_IDX 10
-#define ULP_HA_CLIENT_CNT_IF_TBL_IDX 9
static void ulp_ha_mgr_timer_cancel(struct bnxt_ulp_context *ulp_ctx);
static int32_t ulp_ha_mgr_timer_start(void *arg);
@@ -42,8 +40,8 @@ static int32_t
ulp_ha_mgr_tf_client_num_get(struct bnxt_ulp_context *ulp_ctx, uint32_t *cnt);
static int32_t
-ulp_ha_mgr_state_set(struct bnxt_ulp_context *ulp_ctx,
- enum ulp_ha_mgr_state state)
+ulp_ha_mgr_state_set_v1(struct bnxt_ulp_context *ulp_ctx,
+ enum ulp_ha_mgr_state state)
{
struct tf_set_if_tbl_entry_parms set_parms = { 0 };
struct tf *tfp;
@@ -66,7 +64,7 @@ ulp_ha_mgr_state_set(struct bnxt_ulp_context *ulp_ctx,
set_parms.type = ULP_HA_IF_TBL_TYPE;
set_parms.data = (uint8_t *)&val;
set_parms.data_sz_in_bytes = sizeof(val);
- set_parms.idx = ULP_HA_IF_TBL_IDX;
+ set_parms.idx = bnxt_ulp_ha_reg_state_get(ulp_ctx);
rc = tf_set_if_tbl_entry(tfp, &set_parms);
if (rc)
@@ -76,8 +74,82 @@ ulp_ha_mgr_state_set(struct bnxt_ulp_context *ulp_ctx,
}
static int32_t
-ulp_ha_mgr_tf_client_num_get(struct bnxt_ulp_context *ulp_ctx,
- uint32_t *cnt)
+ulp_ha_mgr_state_set_v2(struct bnxt_ulp_context *ulp_ctx,
+ enum ulp_ha_mgr_state state)
+{
+ struct tf_set_session_hotup_state_parms parms = { 0 };
+ struct tf *tfp;
+ int32_t rc = 0;
+
+ if (ulp_ctx == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid parms in state get.\n");
+ return -EINVAL;
+ }
+
+ tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_SHARED_WC);
+ if (tfp == NULL) {
+ BNXT_TF_DBG(ERR, "Unable to get the TFP.\n");
+ return -EINVAL;
+ }
+
+ parms.state = (uint16_t)state;
+ rc = tf_set_session_hotup_state(tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to write the HA state\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int32_t
+ulp_ha_mgr_state_set(struct bnxt_ulp_context *ulp_ctx,
+ enum ulp_ha_mgr_state state)
+{
+ if (bnxt_ulp_cntxt_multi_shared_session_enabled(ulp_ctx))
+ return ulp_ha_mgr_state_set_v2(ulp_ctx, state);
+ else
+ return ulp_ha_mgr_state_set_v1(ulp_ctx, state);
+}
+
+static int32_t
+ulp_ha_mgr_tf_state_get(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t *state,
+ uint32_t *cnt)
+{
+ struct tf_get_session_hotup_state_parms parms = { 0 };
+ struct tf *tfp;
+ int32_t rc = 0;
+
+ if (ulp_ctx == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid parms in client num get.\n");
+ return -EINVAL;
+ }
+
+ tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_SHARED_WC);
+ if (tfp == NULL) {
+ BNXT_TF_DBG(ERR, "Unable to get the TFP.\n");
+ return -EINVAL;
+ }
+
+ rc = tf_get_session_hotup_state(tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to read the HA state\n");
+ return rc;
+ }
+
+ if (state)
+ *state = parms.state;
+
+ if (cnt)
+ *cnt = parms.ref_cnt;
+
+ return rc;
+}
+
+static int32_t
+ulp_ha_mgr_tf_client_num_get_v1(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t *cnt)
{
struct tf_get_if_tbl_entry_parms get_parms = { 0 };
struct tf *tfp;
@@ -96,7 +168,7 @@ ulp_ha_mgr_tf_client_num_get(struct bnxt_ulp_context *ulp_ctx,
get_parms.dir = ULP_HA_IF_TBL_DIR;
get_parms.type = ULP_HA_IF_TBL_TYPE;
- get_parms.idx = ULP_HA_CLIENT_CNT_IF_TBL_IDX;
+ get_parms.idx = bnxt_ulp_ha_reg_cnt_get(ulp_ctx);
get_parms.data = (uint8_t *)&val;
get_parms.data_sz_in_bytes = sizeof(val);
@@ -108,6 +180,16 @@ ulp_ha_mgr_tf_client_num_get(struct bnxt_ulp_context *ulp_ctx,
return rc;
}
+static int32_t
+ulp_ha_mgr_tf_client_num_get(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t *cnt)
+{
+ if (bnxt_ulp_cntxt_multi_shared_session_enabled(ulp_ctx))
+ return ulp_ha_mgr_tf_state_get(ulp_ctx, NULL, cnt);
+ else
+ return ulp_ha_mgr_tf_client_num_get_v1(ulp_ctx, cnt);
+}
+
static int32_t
ulp_ha_mgr_region_set(struct bnxt_ulp_context *ulp_ctx,
enum ulp_ha_mgr_region region)
@@ -386,9 +468,9 @@ ulp_ha_mgr_app_type_get(struct bnxt_ulp_context *ulp_ctx,
return 0;
}
-int32_t
-ulp_ha_mgr_state_get(struct bnxt_ulp_context *ulp_ctx,
- enum ulp_ha_mgr_state *state)
+static int32_t
+ulp_ha_mgr_state_get_v1(struct bnxt_ulp_context *ulp_ctx,
+ enum ulp_ha_mgr_state *state)
{
struct tf_get_if_tbl_entry_parms get_parms = { 0 };
struct tf *tfp;
@@ -407,7 +489,7 @@ ulp_ha_mgr_state_get(struct bnxt_ulp_context *ulp_ctx,
get_parms.dir = ULP_HA_IF_TBL_DIR;
get_parms.type = ULP_HA_IF_TBL_TYPE;
- get_parms.idx = ULP_HA_IF_TBL_IDX;
+ get_parms.idx = bnxt_ulp_ha_reg_state_get(ulp_ctx);
get_parms.data = (uint8_t *)&val;
get_parms.data_sz_in_bytes = sizeof(val);
@@ -419,6 +501,16 @@ ulp_ha_mgr_state_get(struct bnxt_ulp_context *ulp_ctx,
return rc;
}
+int32_t
+ulp_ha_mgr_state_get(struct bnxt_ulp_context *ulp_ctx,
+ enum ulp_ha_mgr_state *state)
+{
+ if (bnxt_ulp_cntxt_multi_shared_session_enabled(ulp_ctx))
+ return ulp_ha_mgr_tf_state_get(ulp_ctx, state, NULL);
+ else
+ return ulp_ha_mgr_state_get_v1(ulp_ctx, state);
+}
+
int32_t
ulp_ha_mgr_open(struct bnxt_ulp_context *ulp_ctx)
{
@@ -607,10 +699,9 @@ ulp_ha_mgr_close(struct bnxt_ulp_context *ulp_ctx)
BNXT_TF_DBG(INFO,
"On Close: SEC[COPY] => [INIT] after %d ms\n",
ULP_HA_WAIT_TIMEOUT - timeout);
- } else {
- BNXT_TF_DBG(ERR, "On Close: Invalid type/state %d/%d\n",
- curr_state, app_type);
}
+ /* else do nothing just return*/
+
cleanup:
return rc;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.h b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.h
index ded967a0af..c39a1371d9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -8,6 +8,9 @@
#include "bnxt_ulp.h"
+#define ULP_HA_IF_TBL_IDX 10
+#define ULP_HA_CLIENT_CNT_IF_TBL_IDX 9
+
enum ulp_ha_mgr_state {
ULP_HA_STATE_INIT,
ULP_HA_STATE_PRIM_RUN,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 1f459c52a4..e5f1d266d7 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -149,7 +149,7 @@ ulp_mapper_resource_ident_allocate(struct bnxt_ulp_context *ulp_ctx,
* Shared resources are never allocated through this method, so the
* shared flag is always false.
*/
- rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, false);
+ rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, shared);
if (rc) {
BNXT_TF_DBG(ERR, "Failed to write to global resource id\n");
/* Free the identifier when update failed */
@@ -212,7 +212,7 @@ ulp_mapper_resource_index_tbl_alloc(struct bnxt_ulp_context *ulp_ctx,
* Shared resources are never allocated through this method, so the
* shared flag is always false.
*/
- rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, false);
+ rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, shared);
if (rc) {
BNXT_TF_DBG(ERR, "Failed to write to global resource id\n");
/* Free the identifier when update failed */
@@ -442,6 +442,7 @@ ulp_mapper_dyn_tbl_type_get(struct bnxt_ulp_mapper_parms *mparms,
case TF_TBL_TYPE_ACT_ENCAP_16B:
case TF_TBL_TYPE_ACT_ENCAP_32B:
case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_ENCAP_128B:
size_map = d_params->dyn_encap_sizes;
for (i = 0; i < d_params->dyn_encap_list_size; i++) {
if (blob_len <= size_map[i].slab_size) {
@@ -534,6 +535,41 @@ ulp_mapper_tcam_entry_free(struct bnxt_ulp_context *ulp,
return tf_free_tcam_entry(tfp, &fparms);
}
+static int32_t
+ulp_mapper_clear_full_action_record(struct tf *tfp,
+ struct bnxt_ulp_context *ulp_ctx,
+ struct tf_free_tbl_entry_parms *fparms)
+{
+ struct tf_set_tbl_entry_parms sparms = { 0 };
+ uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
+ int32_t rc = 0;
+
+ rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
+ return rc;
+ }
+
+ if (dev_id == BNXT_ULP_DEVICE_ID_THOR) {
+ sparms.dir = fparms->dir;
+ sparms.data = mapper_fld_zeros;
+ sparms.type = fparms->type;
+ sparms.data_sz_in_bytes = 16; /* FULL ACT REC SIZE - THOR */
+ sparms.idx = fparms->idx;
+ sparms.tbl_scope_id = fparms->tbl_scope_id;
+ rc = tf_set_tbl_entry(tfp, &sparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR,
+ "Index table[%s][%s][%x] write fail rc=%d\n",
+ tf_tbl_type_2_str(sparms.type),
+ tf_dir_2_str(sparms.dir),
+ sparms.idx, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
static inline int32_t
ulp_mapper_index_entry_free(struct bnxt_ulp_context *ulp,
struct tf *tfp,
@@ -551,6 +587,9 @@ ulp_mapper_index_entry_free(struct bnxt_ulp_context *ulp,
*/
(void)bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id);
+ if (fparms.type == TF_TBL_TYPE_FULL_ACT_RECORD)
+ (void)ulp_mapper_clear_full_action_record(tfp, ulp, &fparms);
+
return tf_free_tbl_entry(tfp, &fparms);
}
@@ -665,6 +704,10 @@ ulp_mapper_fdb_opc_alloc_rid(struct bnxt_ulp_mapper_parms *parms,
BNXT_ULP_FDB_TYPE_RID, rid);
return -EINVAL;
}
+ /* save the rid into the parms in case a flow fails before pushing the
+ * rid into the fid
+ */
+ parms->rid = rid;
return 0;
}
@@ -845,7 +888,7 @@ ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms,
tf_ident_2_str(iparms.ident_type));
return rc;
}
- BNXT_TF_INF("Alloc ident %s:%s.success.\n",
+ BNXT_TF_DBG(DEBUG, "Alloc ident %s:%s.success.\n",
tf_dir_2_str(iparms.dir),
tf_ident_2_str(iparms.ident_type));
@@ -941,9 +984,9 @@ ulp_mapper_ident_extract(struct bnxt_ulp_mapper_parms *parms,
sparms.search_id);
return rc;
}
- BNXT_TF_INF("Search ident %s:%s:%x.success.\n",
+ BNXT_TF_DBG(DEBUG, "Search ident %s:%s:%x.success.\n",
tf_dir_2_str(sparms.dir),
- tf_tbl_type_2_str(sparms.ident_type),
+ tf_ident_2_str(sparms.ident_type),
sparms.search_id);
/* Write it to the regfile */
@@ -1016,6 +1059,20 @@ ulp_mapper_field_port_db_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
break;
+ case BNXT_ULP_PORT_TABLE_PORT_IS_PF:
+ if (ulp_port_db_port_is_pf_get(parms->ulp_ctx, port_id,
+ val)) {
+ BNXT_TF_DBG(ERR, "Invalid port id %u\n", port_id);
+ return -EINVAL;
+ }
+ break;
+ case BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA:
+ if (ulp_port_db_port_meta_data_get(parms->ulp_ctx, port_id,
+ val)) {
+ BNXT_TF_DBG(ERR, "Invalid port id %u\n", port_id);
+ return -EINVAL;
+ }
+ break;
default:
BNXT_TF_DBG(ERR, "Invalid port_data %d\n", port_data);
return -EINVAL;
@@ -1042,6 +1099,7 @@ ulp_mapper_field_src_process(struct bnxt_ulp_mapper_parms *parms,
uint8_t *buffer;
uint64_t lregval;
bool shared;
+ uint8_t i = 0;
*val_len = bitlen;
*value = 0;
@@ -1111,6 +1169,11 @@ ulp_mapper_field_src_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
*val = &buffer[field_size - bytelen];
+ if (sizeof(*value) >= field_size) {
+ *value = buffer[0];
+ for (i = 1; i < field_size; i++)
+ *value = (*value << 8) | buffer[i];
+ }
break;
case BNXT_ULP_FIELD_SRC_ACT_PROP_SZ:
if (!ulp_operand_read(field_opr,
@@ -1254,11 +1317,22 @@ ulp_mapper_field_src_process(struct bnxt_ulp_mapper_parms *parms,
}
break;
case BNXT_ULP_FIELD_SRC_PORT_TABLE:
+ if (!ulp_operand_read(field_opr,
+ (uint8_t *)&idx, sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "CF operand read failed\n");
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST || bytelen > sizeof(uint64_t)) {
+ BNXT_TF_DBG(ERR, "comp field [%d] read oob %d\n", idx,
+ bytelen);
+ return -EINVAL;
+ }
+
/* The port id is present in the comp field list */
- port_id = ULP_COMP_FLD_IDX_RD(parms,
- BNXT_ULP_CF_IDX_DEV_PORT_ID);
+ port_id = ULP_COMP_FLD_IDX_RD(parms, idx);
/* get the port table enum */
- if (!ulp_operand_read(field_opr,
+ if (!ulp_operand_read(field_opr + sizeof(uint16_t),
(uint8_t *)&idx, sizeof(uint16_t))) {
BNXT_TF_DBG(ERR, "Port table enum read failed\n");
return -EINVAL;
@@ -1557,9 +1631,8 @@ ulp_mapper_field_opc_process(struct bnxt_ulp_mapper_parms *parms,
break;
}
- if (!rc) {
+ if (!rc)
return rc;
- }
error:
BNXT_TF_DBG(ERR, "Error in %s:%s process %u:%u\n", name,
fld->description, (val) ? write_idx : 0, val_len);
@@ -1878,7 +1951,7 @@ ulp_mapper_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms,
tf_dir_2_str(sparms.dir), sparms.idx);
return -EIO;
}
- BNXT_TF_INF("tcam[%s][%s][%x] write success.\n",
+ BNXT_TF_DBG(DEBUG, "tcam[%s][%s][%x] write success.\n",
tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
tf_dir_2_str(sparms.dir), sparms.idx);
@@ -2168,7 +2241,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
rc = tf_search_tcam_entry(tfp, &searchparms);
if (rc) {
- BNXT_TF_DBG(ERR, "tcam search failed rc=%d\n", rc);
+ BNXT_TF_DBG(ERR, "entry priority process failed\n");
return rc;
}
@@ -2546,7 +2619,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
gparms.dir = tbl->direction;
gparms.type = tbl->resource_type;
gparms.data = ulp_blob_data_get(&data, &tmplen);
- gparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ gparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tbl->result_bit_size);
gparms.idx = index;
rc = tf_get_tbl_entry(tfp, &gparms);
if (rc) {
@@ -2651,7 +2724,6 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
if (shared)
tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx,
tbl->session_type);
-
rc = tf_set_tbl_entry(tfp, &sparms);
if (rc) {
BNXT_TF_DBG(ERR,
@@ -2661,7 +2733,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
sparms.idx, rc);
goto error;
}
- BNXT_TF_INF("Index table[%s][%s][%x] write successful.\n",
+ BNXT_TF_DBG(DEBUG, "Index table[%s][%s][%x] write successful\n",
tf_tbl_type_2_str(sparms.type),
tf_dir_2_str(sparms.dir), sparms.idx);
@@ -2832,6 +2904,61 @@ ulp_mapper_if_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
+static int32_t
+ulp_mapper_gen_tbl_ref_cnt_process(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_mapper_gen_tbl_entry *entry)
+{
+ int32_t rc = 0;
+ uint64_t val64;
+
+ /* Allow the template to manage the reference count */
+ switch (tbl->ref_cnt_opcode) {
+ case BNXT_ULP_REF_CNT_OPC_INC:
+ ULP_GEN_TBL_REF_CNT_INC(entry);
+ break;
+ case BNXT_ULP_REF_CNT_OPC_DEC:
+ /* writes never decrement the ref count */
+ if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_WRITE)
+ return -EINVAL;
+
+ ULP_GEN_TBL_REF_CNT_DEC(entry);
+ break;
+ case BNXT_ULP_REF_CNT_OPC_NOP:
+ /* Nothing to be done, generally used when
+ * template gets the ref_cnt to make a decision
+ */
+ break;
+ case BNXT_ULP_REF_CNT_OPC_DEFAULT:
+ /* This is the default case and is backward
+ * compatible with older templates
+ */
+ if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP)
+ ULP_GEN_TBL_REF_CNT_INC(entry);
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Invalid REF_CNT_OPC %d\n",
+ tbl->ref_cnt_opcode);
+ return -EINVAL;
+ }
+
+ if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_READ) {
+ /* Add ref_cnt to the regfile for template to use. */
+ val64 = (uint32_t)ULP_GEN_TBL_REF_CNT(entry);
+ val64 = tfp_cpu_to_be_64(val64);
+ rc = ulp_regfile_write(parms->regfile,
+ BNXT_ULP_RF_IDX_REF_CNT,
+ val64);
+ if (rc) {
+ BNXT_TF_DBG(ERR,
+ "Failed to write regfile[ref_cnt]\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static int32_t
ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
@@ -2886,6 +3013,7 @@ ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
/* The_key is a byte array convert it to a search index */
cache_key = ulp_blob_data_get(&key, &tmplen);
+
/* get the generic table */
gen_tbl_list = &parms->mapper_data->gen_tbl_list[tbl_idx];
@@ -2949,10 +3077,6 @@ ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
"Failed to scan ident list\n");
return -EINVAL;
}
- if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) {
- /* increment the reference count */
- ULP_GEN_TBL_REF_CNT_INC(&gen_tbl_ent);
- }
/* it is a hit */
gen_tbl_miss = 0;
@@ -2969,8 +3093,13 @@ ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
/* store the hash index in the fdb */
key_index = hash_entry.hash_index;
}
- /* check the reference count */
- if (ULP_GEN_TBL_REF_CNT(&gen_tbl_ent)) {
+
+ /* check the reference count and ignore ref_cnt if NOP.
+ * NOP allows a write as an update.
+ */
+
+ if (tbl->ref_cnt_opcode != BNXT_ULP_REF_CNT_OPC_NOP &&
+ ULP_GEN_TBL_REF_CNT(&gen_tbl_ent)) {
/* a hit then error */
BNXT_TF_DBG(ERR, "generic entry already present\n");
return -EINVAL; /* success */
@@ -2999,8 +3128,6 @@ ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
- /* increment the reference count */
- ULP_GEN_TBL_REF_CNT_INC(&gen_tbl_ent);
fdb_write = 1;
parms->shared_hndl = (uint64_t)tbl_idx << 32 | key_index;
break;
@@ -3030,9 +3157,24 @@ ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms,
ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type);
rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms);
- if (rc)
+ if (rc) {
BNXT_TF_DBG(ERR, "Fail to add gen ent flowdb %d\n", rc);
+ return rc;
+ }
+
+ /* Reset the in-flight RID when generic table is written and the
+ * rid has been pushed into a handle (rid or fid). Once it has
+ * been written, we have persistent accounting of the resources.
+ */
+ if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_WRITE &&
+ (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE ||
+ tbl->fdb_opcode == BNXT_ULP_FDB_OPC_PUSH_FID))
+ parms->rid = 0;
+
+ rc = ulp_mapper_gen_tbl_ref_cnt_process(parms, tbl,
+ &gen_tbl_ent);
}
+
return rc;
}
@@ -3041,6 +3183,8 @@ ulp_mapper_ctrl_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
{
int32_t rc = 0;
+ uint64_t val64 = 0;
+ uint32_t rid;
/* process the fdb opcode for alloc push */
if (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE) {
@@ -3049,7 +3193,204 @@ ulp_mapper_ctrl_tbl_process(struct bnxt_ulp_mapper_parms *parms,
BNXT_TF_DBG(ERR, "Failed to do fdb alloc\n");
return rc;
}
+ } else if (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE) {
+ rc = ulp_regfile_read(parms->regfile, tbl->fdb_operand, &val64);
+ if (!rc) {
+ BNXT_TF_DBG(ERR, "Failed to get RID from regfile\n");
+ return rc;
+ }
+ rid = (uint32_t)tfp_be_to_cpu_64(val64);
+ rc = ulp_mapper_resources_free(parms->ulp_ctx,
+ BNXT_ULP_FDB_TYPE_RID,
+ rid);
+ }
+
+ return rc;
+}
+
+static int32_t
+ulp_mapper_vnic_tbl_process(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct ulp_flow_db_res_params fid_parms;
+ uint16_t vnic_idx = 0, vnic_id = 0;
+ int32_t rc = 0;
+
+ switch (tbl->resource_sub_type) {
+ case BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS:
+ if (tbl->tbl_opcode != BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE) {
+ BNXT_TF_DBG(ERR, "Invalid vnic table opcode\n");
+ return -EINVAL;
+ }
+ rc = bnxt_pmd_rss_action_create(parms, &vnic_idx, &vnic_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed create rss action\n");
+ return rc;
+ }
+ break;
+ case BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE:
+ if (tbl->tbl_opcode != BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE) {
+ BNXT_TF_DBG(ERR, "Invalid vnic table opcode\n");
+ return -EINVAL;
+ }
+ rc = bnxt_pmd_queue_action_create(parms, &vnic_idx, &vnic_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed create queue action\n");
+ return rc;
+ }
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Invalid vnic table sub type\n");
+ return -EINVAL;
+ }
+
+ /* Link the created vnic to the flow in the flow db */
+ memset(&fid_parms, 0, sizeof(fid_parms));
+ fid_parms.direction = tbl->direction;
+ fid_parms.resource_func = tbl->resource_func;
+ fid_parms.resource_type = tbl->resource_type;
+ fid_parms.resource_sub_type = tbl->resource_sub_type;
+ fid_parms.resource_hndl = vnic_idx;
+ fid_parms.critical_resource = tbl->critical_resource;
+ rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to link resource to flow rc = %d\n",
+ rc);
+ return rc;
+ }
+ rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand,
+ (uint64_t)tfp_cpu_to_be_64(vnic_id));
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to write regfile[%d] rc=%d\n",
+ tbl->tbl_operand, rc);
+
+ return rc;
+}
+
+/* Free the vnic resource */
+static int32_t
+ulp_mapper_vnic_tbl_res_free(struct bnxt_ulp_context *ulp __rte_unused,
+ struct tf *tfp,
+ struct ulp_flow_db_res_params *res)
+{
+ uint16_t vnic_idx = res->resource_hndl;
+
+ if (res->resource_sub_type ==
+ BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE)
+ return bnxt_pmd_queue_action_delete(tfp, vnic_idx);
+ else
+ return bnxt_pmd_rss_action_delete(tfp, vnic_idx);
+}
+
+static int32_t
+ulp_mapper_global_res_free(struct bnxt_ulp_context *ulp __rte_unused,
+ struct tf *tfp __rte_unused,
+ struct ulp_flow_db_res_params *res)
+{
+ uint16_t port_id = 0, dport = 0; /* Not needed for free */
+ int32_t rc = 0;
+ uint8_t ttype;
+ uint32_t handle = res->resource_hndl;
+
+ switch (res->resource_sub_type) {
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN:
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN;
+ rc = bnxt_pmd_global_tunnel_set(port_id, ttype, dport,
+ &handle);
+ break;
+ default:
+ rc = -EINVAL;
+ BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
+ res->resource_sub_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int32_t
+ulp_mapper_global_register_tbl_process(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct ulp_flow_db_res_params fid_parms = { 0 };
+ struct ulp_blob data;
+ uint16_t data_len = 0;
+ uint8_t *tmp_data;
+ uint16_t udp_port;
+ uint32_t handle;
+ int32_t rc = 0, write_reg = 0;
+ uint8_t ttype;
+
+ /* Initialize the blob data */
+ if (!ulp_blob_init(&data, tbl->result_bit_size,
+ BNXT_ULP_BYTE_ORDER_BE)) {
+ BNXT_TF_DBG(ERR, "Failed initial ulp_global table blob\n");
+ return -EINVAL;
+ }
+
+ /* read the arguments from the result table */
+ rc = ulp_mapper_tbl_result_build(parms, tbl, &data,
+ "ULP Global Result");
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to build the result blob\n");
+ return rc;
+ }
+
+ switch (tbl->tbl_opcode) {
+ case BNXT_ULP_GLOBAL_REGISTER_TBL_OPC_WR_REGFILE:
+ write_reg = 1;
+ break;
+ case BNXT_ULP_GLOBAL_REGISTER_TBL_OPC_NOT_USED:
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Invalid global table opcode %d\n",
+ tbl->tbl_opcode);
+ return -EINVAL;
+ }
+
+ switch (tbl->resource_sub_type) {
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN:
+ tmp_data = ulp_blob_data_get(&data, &data_len);
+ udp_port = *((uint16_t *)tmp_data);
+ udp_port = tfp_be_to_cpu_16(udp_port);
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN;
+
+ rc = bnxt_pmd_global_tunnel_set(parms->port_id, ttype,
+ udp_port, &handle);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to set VXLAN UDP port\n");
+ return rc;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
+ tbl->resource_sub_type);
+ return rc;
}
+
+ /* Set the common pieces of fid parms */
+ fid_parms.direction = tbl->direction;
+ fid_parms.resource_func = tbl->resource_func;
+ fid_parms.resource_sub_type = tbl->resource_sub_type;
+ fid_parms.critical_resource = tbl->critical_resource;
+ fid_parms.resource_hndl = handle;
+
+ rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms);
+
+ if (rc)
+ return rc;
+
+ /* write to the regfile if opcode is set */
+ if (write_reg) {
+ rc = ulp_regfile_write(parms->regfile,
+ tbl->tbl_operand,
+ (uint64_t)tfp_cpu_to_be_64(handle));
+ if (rc)
+ BNXT_TF_DBG(ERR, "Regfile[%d] write failed.\n",
+ tbl->tbl_operand);
+ }
+
return rc;
}
@@ -3112,36 +3453,33 @@ ulp_mapper_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
return rc;
}
-/*
- * Iterate over the shared resources assigned during tf_open_session and store
- * them in the global regfile with the shared flag.
- */
static int32_t
ulp_mapper_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
- struct bnxt_ulp_mapper_data *mapper_data)
+ struct bnxt_ulp_mapper_data *mapper_data)
{
struct bnxt_ulp_glb_resource_info *glb_res;
uint32_t num_glb_res_ids, idx, dev_id;
uint8_t app_id;
- uint32_t rc = 0;
+ int32_t rc = 0;
glb_res = bnxt_ulp_app_glb_resource_info_list_get(&num_glb_res_ids);
if (!glb_res || !num_glb_res_ids) {
BNXT_TF_DBG(ERR, "Invalid Arguments\n");
return -EINVAL;
}
+
rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
if (rc) {
- BNXT_TF_DBG(ERR, "Failed to get device_id for glb init (%d)\n",
+ BNXT_TF_DBG(ERR, "Failed to get device id for glb init (%d)\n",
rc);
- return -EINVAL;
+ return rc;
}
rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
if (rc) {
- BNXT_TF_DBG(ERR, "Failed to get app_id for glb init (%d)\n",
+ BNXT_TF_DBG(ERR, "Failed to get app id for glb init (%d)\n",
rc);
- return -EINVAL;
+ return rc;
}
/* Iterate the global resources and process each one */
@@ -3154,13 +3492,13 @@ ulp_mapper_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
rc = ulp_mapper_resource_ident_allocate(ulp_ctx,
mapper_data,
&glb_res[idx],
- false);
+ true);
break;
case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
rc = ulp_mapper_resource_index_tbl_alloc(ulp_ctx,
mapper_data,
&glb_res[idx],
- false);
+ true);
break;
default:
BNXT_TF_DBG(ERR, "Global resource %x not supported\n",
@@ -3726,6 +4064,12 @@ ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, uint32_t tid)
case BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE:
rc = ulp_mapper_ctrl_tbl_process(parms, tbl);
break;
+ case BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE:
+ rc = ulp_mapper_vnic_tbl_process(parms, tbl);
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_GLOBAL_REGISTER_TABLE:
+ rc = ulp_mapper_global_register_tbl_process(parms, tbl);
+ break;
case BNXT_ULP_RESOURCE_FUNC_INVALID:
rc = 0;
break;
@@ -3781,7 +4125,7 @@ ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, uint32_t tid)
return rc;
error:
- BNXT_TF_DBG(ERR, "%s tables failed creation for %d:%d\n",
+ BNXT_TF_DBG(ERR, "%s tables failed operation for %d:%d\n",
ulp_mapper_tmpl_name_str(parms->tmpl_type),
parms->dev_id, tid);
return rc;
@@ -3828,7 +4172,13 @@ ulp_mapper_resource_free(struct bnxt_ulp_context *ulp,
rc = ulp_mapper_child_flow_free(ulp, fid, res);
break;
case BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE:
- rc = ulp_mapper_gen_tbl_res_free(ulp, res);
+ rc = ulp_mapper_gen_tbl_res_free(ulp, fid, res);
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE:
+ rc = ulp_mapper_vnic_tbl_res_free(ulp, tfp, res);
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_GLOBAL_REGISTER_TABLE:
+ rc = ulp_mapper_global_res_free(ulp, tfp, res);
break;
default:
break;
@@ -4045,11 +4395,26 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx,
return rc;
flow_error:
+ if (parms.rid) {
+ /* An RID was in-flight but not pushed, free the resources */
+ trc = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_RID,
+ parms.rid);
+ if (trc)
+ BNXT_TF_DBG(ERR,
+ "Failed to free resources rid=0x%08x rc=%d\n",
+ parms.rid, trc);
+ parms.rid = 0;
+ }
+
/* Free all resources that were allocated during flow creation */
- trc = ulp_mapper_flow_destroy(ulp_ctx, parms.flow_type,
- parms.fid);
- if (trc)
- BNXT_TF_DBG(ERR, "Failed to free all resources rc=%d\n", trc);
+ if (parms.fid) {
+ trc = ulp_mapper_flow_destroy(ulp_ctx, parms.flow_type,
+ parms.fid);
+ if (trc)
+ BNXT_TF_DBG(ERR,
+ "Failed to free resources fid=0x%08x rc=%d\n",
+ parms.fid, trc);
+ }
return rc;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
index b7e6f3ada2..225a14ccfa 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -52,6 +52,7 @@ struct bnxt_ulp_mapper_parms {
struct ulp_regfile *regfile;
struct bnxt_ulp_context *ulp_ctx;
uint32_t fid;
+ uint32_t rid;
enum bnxt_ulp_fdb_type flow_type;
struct bnxt_ulp_mapper_data *mapper_data;
struct bnxt_ulp_device_params *device_params;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_matcher.c b/drivers/net/bnxt/tf_ulp/ulp_matcher.c
index 67fa61fc7c..8c90998a7d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_matcher.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_matcher.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -29,8 +29,8 @@ ulp_matcher_action_hash_calculate(uint64_t hi_sig, uint64_t app_id)
hi_sig |= ((hi_sig % BNXT_ULP_ACT_HID_HIGH_PRIME) <<
BNXT_ULP_ACT_HID_SHFTL);
- app_id |= ((app_id % BNXT_ULP_CLASS_HID_LOW_PRIME) <<
- (BNXT_ULP_CLASS_HID_SHFTL + 2));
+ app_id |= ((app_id % BNXT_ULP_ACT_HID_LOW_PRIME) <<
+ (BNXT_ULP_ACT_HID_SHFTL + 2));
hash = hi_sig ^ app_id;
hash = (hash >> BNXT_ULP_ACT_HID_SHFTR) & BNXT_ULP_ACT_HID_MASK;
return (uint32_t)hash;
@@ -46,12 +46,8 @@ ulp_matcher_pattern_match(struct ulp_rte_parser_params *params,
{
struct bnxt_ulp_class_match_info *class_match;
uint32_t class_hid;
- uint8_t vf_to_vf;
uint16_t tmpl_id;
- /* Get vf to vf flow */
- vf_to_vf = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_VF_TO_VF);
-
/* calculate the hash of the given flow */
class_hid = ulp_matcher_class_hash_calculate((params->hdr_bitmap.bits ^
params->app_id),
@@ -81,10 +77,6 @@ ulp_matcher_pattern_match(struct ulp_rte_parser_params *params,
goto error;
}
- if (vf_to_vf != class_match->act_vnic) {
- BNXT_TF_DBG(DEBUG, "Vnic Match failed\n");
- goto error;
- }
BNXT_TF_DBG(DEBUG, "Found matching pattern template %d\n",
class_match->class_tid);
*class_id = class_match->class_tid;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 57c9e7d175..ba1f966ec3 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -150,6 +150,11 @@ int32_t ulp_port_db_port_update(struct bnxt_ulp_context *ulp_ctxt,
intf = &port_db->ulp_intf_list[ifindex];
intf->type = bnxt_pmd_get_interface_type(port_id);
+ if (intf->type == BNXT_ULP_INTF_TYPE_PF)
+ intf->type_is_pf = 1;
+ else
+ intf->type_is_pf = 0;
+
intf->drv_func_id = bnxt_pmd_get_fw_func_id(port_id,
BNXT_ULP_INTF_TYPE_INVALID);
@@ -182,6 +187,9 @@ int32_t ulp_port_db_port_update(struct bnxt_ulp_context *ulp_ctxt,
bnxt_pmd_get_vnic_id(port_id, BNXT_ULP_INTF_TYPE_VF_REP);
func->phy_port_id = bnxt_pmd_get_phy_port_id(port_id);
func->ifindex = ifindex;
+ func->func_valid = true;
+ func->vf_meta_data = tfp_cpu_to_be_16(BNXT_ULP_META_VF_FLAG |
+ intf->vf_func_id);
}
/* When there is no match, the default action is to send the packet to
@@ -702,3 +710,53 @@ ulp_port_db_phy_port_get(struct bnxt_ulp_context *ulp_ctxt,
}
return -EINVAL;
}
+
+/*
+ * Api to get the port type for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in] device port id
+ * type [out] type if pf or not
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_port_is_pf_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t port_id, uint8_t **type)
+{
+ struct ulp_func_if_info *info;
+ struct bnxt_ulp_port_db *port_db;
+ uint16_t pid;
+
+ port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt);
+ info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id);
+ if (info) {
+ pid = info->ifindex;
+ *type = &port_db->ulp_intf_list[pid].type_is_pf;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Api to get the meta data for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in] dpdk port id
+ * meta data [out] the meta data of the given port
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_port_meta_data_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint16_t port_id, uint8_t **meta_data)
+{
+ struct ulp_func_if_info *info;
+
+ info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id);
+ if (info) {
+ *meta_data = (uint8_t *)&info->vf_meta_data;
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 784b93f8b3..d4efe0a3d5 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -328,4 +328,30 @@ ulp_port_db_parent_vnic_get(struct bnxt_ulp_context *ulp_ctxt,
int32_t
ulp_port_db_phy_port_get(struct bnxt_ulp_context *ulp_ctxt,
uint32_t port_id, uint16_t *phy_port);
+
+/*
+ * Api to get the port type for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in] device port id
+ * type [out] type if pf or not
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_port_is_pf_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t port_id, uint8_t **type);
+
+/*
+ * Api to get the meta data for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in] dpdk port id
+ * meta data [out] the meta data of the given port
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_port_meta_data_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint16_t port_id, uint8_t **meta_data);
#endif /* _ULP_PORT_DB_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c b/drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c
index 9cf1ebfe1d..1fbfe18db3 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -38,8 +38,8 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.proto_act_func = NULL
},
[RTE_FLOW_ACTION_TYPE_QUEUE] = {
- .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
- .proto_act_func = NULL
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_queue_act_handler
},
[RTE_FLOW_ACTION_TYPE_DROP] = {
.act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
@@ -162,12 +162,12 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.proto_act_func = NULL
},
[RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = {
- .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
- .proto_act_func = NULL
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_set_mac_src_act_handler
},
[RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = {
- .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
- .proto_act_func = NULL
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_set_mac_dst_act_handler
},
[RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ] = {
.act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
@@ -197,6 +197,14 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
.proto_act_func = ulp_rte_port_act_handler
},
+ [RTE_FLOW_ACTION_TYPE_INDIRECT] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_action_hdlr_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_INDIRECT + 1] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ }
};
struct bnxt_ulp_rte_act_info ulp_vendor_act_info[] = {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 3566f3000b..d7450b92ff 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -24,6 +24,7 @@
#define ULP_VLAN_PRIORITY_MASK 0x700
#define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
#define ULP_UDP_PORT_VXLAN 4789
+#define ULP_UDP_PORT_VXLAN_MASK 0XFFFF
/* Utility function to skip the void items. */
static inline int32_t
@@ -190,7 +191,7 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
hdr_info = &ulp_vendor_act_info[action_item->type -
BNXT_RTE_FLOW_ACTION_TYPE_END];
} else {
- if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
+ if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT)
goto act_parser_error;
/* get the header information from the act info table */
hdr_info = &ulp_act_info[action_item->type];
@@ -227,7 +228,7 @@ static void
bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
{
uint32_t ifindex;
- uint16_t port_id, parif;
+ uint16_t port_id, parif, svif;
uint32_t mtype;
enum bnxt_ulp_direction_type dir;
@@ -252,6 +253,14 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
}
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
+ /* Set port SVIF */
+ if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_PHY_PORT_SVIF, &svif)) {
+ BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF,
+ svif);
} else {
/* Get the match port type */
mtype = ULP_COMP_FLD_IDX_RD(params,
@@ -317,10 +326,11 @@ ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
BNXT_ULP_FLOW_DIR_BITMASK_EGR);
}
- /* calculate the VF to VF flag */
+ /* Evaluate the VF to VF flag */
if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
+ ULP_BITMAP_SET(params->act_bitmap.bits,
+ BNXT_ULP_ACT_BIT_VF_TO_VF);
/* Update the decrement ttl computational fields */
if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
@@ -438,8 +448,7 @@ ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
else
svif_type = BNXT_ULP_DRV_FUNC_SVIF;
}
- ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
- &svif);
+ ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif);
svif = rte_cpu_to_be_16(svif);
hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
memcpy(hdr_field->spec, &svif, sizeof(svif));
@@ -575,8 +584,11 @@ ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
/* Function to handle the update of proto header based on field values */
static void
ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
- uint16_t type, uint32_t in_flag)
+ uint16_t type, uint32_t in_flag,
+ uint32_t has_vlan, uint32_t has_vlan_mask)
{
+#define ULP_RTE_ETHER_TYPE_ROE 0xfc3d
+
if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
if (in_flag) {
ULP_BITMAP_SET(param->hdr_fp_bit.bits,
@@ -587,7 +599,7 @@ ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
BNXT_ULP_HDR_BIT_O_IPV4);
ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
}
- } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
if (in_flag) {
ULP_BITMAP_SET(param->hdr_fp_bit.bits,
BNXT_ULP_HDR_BIT_I_IPV6);
@@ -597,6 +609,29 @@ ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
BNXT_ULP_HDR_BIT_O_IPV6);
ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
+ has_vlan_mask = 1;
+ has_vlan = 1;
+ } else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
+ /* Update the hdr_bitmap with RoE */
+ ULP_BITMAP_SET(param->hdr_fp_bit.bits,
+ BNXT_ULP_HDR_BIT_O_ROE);
+ }
+
+ if (has_vlan_mask) {
+ if (in_flag) {
+ ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG,
+ has_vlan);
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE,
+ 1);
+ } else {
+ ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG,
+ has_vlan);
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE,
+ 1);
+ }
}
}
@@ -624,17 +659,25 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
uint32_t size;
uint16_t eth_type = 0;
uint32_t inner_flag = 0;
+ uint32_t has_vlan = 0, has_vlan_mask = 0;
/* Perform validations */
if (eth_spec) {
- /* Todo: work around to avoid multicast and broadcast addr */
- if (ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.dst_addr))
+ /* Avoid multicast and broadcast addr */
+ if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
+ ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.dst_addr))
return BNXT_TF_RC_PARSE_ERR;
- if (ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.src_addr))
+ if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
+ ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.src_addr))
return BNXT_TF_RC_PARSE_ERR;
eth_type = eth_spec->hdr.ether_type;
+ has_vlan = eth_spec->has_vlan;
+ }
+ if (eth_mask) {
+ eth_type &= eth_mask->hdr.ether_type;
+ has_vlan_mask = eth_mask->has_vlan;
}
if (ulp_rte_prsr_fld_size_validate(params, &idx,
@@ -663,7 +706,8 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
ulp_rte_prsr_fld_mask(params, &idx, size,
ulp_deference_struct(eth_spec, hdr.ether_type),
ulp_deference_struct(eth_mask, hdr.ether_type),
- ULP_PRSR_ACT_MATCH_IGNORE);
+ (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
+ ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
/* Update the protocol hdr bitmap */
if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
@@ -684,7 +728,8 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
dmac_idx);
}
/* Update the field protocol hdr bitmap */
- ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
+ ulp_rte_l2_proto_type_update(params, eth_type, inner_flag,
+ has_vlan, has_vlan_mask);
return BNXT_TF_RC_SUCCESS;
}
@@ -837,7 +882,7 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
return BNXT_TF_RC_ERROR;
}
/* Update the field protocol hdr bitmap */
- ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
+ ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1);
return BNXT_TF_RC_SUCCESS;
}
@@ -876,22 +921,21 @@ ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
ULP_BITMAP_SET(param->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_O_ICMP);
}
- if (proto) {
- if (in_flag) {
- ULP_COMP_FLD_IDX_WR(param,
- BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(param,
- BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
- proto);
- } else {
- ULP_COMP_FLD_IDX_WR(param,
- BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(param,
- BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
- proto);
- }
+
+ if (in_flag) {
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
+ proto);
+ } else {
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(param,
+ BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
+ proto);
}
}
@@ -906,6 +950,7 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
uint32_t idx = 0, dip_idx = 0;
uint32_t size;
uint8_t proto = 0;
+ uint8_t proto_mask = 0;
uint32_t inner_flag = 0;
uint32_t cnt;
@@ -934,8 +979,7 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
/*
* The tos field is ignored since OVS is setting it as wild card
- * match and it is not supported. This is a work around and
- * shall be addressed in the future.
+ * match and it is not supported. An application can enable tos support.
*/
size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
ulp_rte_prsr_fld_mask(params, &idx, size,
@@ -943,7 +987,8 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
hdr.type_of_service),
ulp_deference_struct(ipv4_mask,
hdr.type_of_service),
- ULP_PRSR_ACT_MASK_IGNORE);
+ (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
+ ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
ulp_rte_prsr_fld_mask(params, &idx, size,
@@ -978,7 +1023,9 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
hdr.next_proto_id),
ulp_deference_struct(ipv4_mask,
hdr.next_proto_id),
- ULP_PRSR_ACT_MATCH_IGNORE);
+ (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
+ ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
+
if (ipv4_spec)
proto = ipv4_spec->hdr.next_proto_id;
@@ -1020,11 +1067,14 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
* in the IPv4 spec but don't set the mask. So, consider
* the mask in the proto value calculation.
*/
- if (ipv4_mask)
+ if (ipv4_mask) {
proto &= ipv4_mask->hdr.next_proto_id;
+ proto_mask = ipv4_mask->hdr.next_proto_id;
+ }
/* Update the field protocol hdr bitmap */
- ulp_rte_l3_proto_type_update(params, proto, inner_flag);
+ if (proto_mask)
+ ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
}
@@ -1038,11 +1088,12 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = 0, dip_idx = 0;
- uint32_t size;
+ uint32_t size, vtc_flow;
uint32_t ver_spec = 0, ver_mask = 0;
uint32_t tc_spec = 0, tc_mask = 0;
uint32_t lab_spec = 0, lab_mask = 0;
uint8_t proto = 0;
+ uint8_t proto_mask = 0;
uint32_t inner_flag = 0;
uint32_t cnt;
@@ -1064,22 +1115,25 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
* header fields
*/
if (ipv6_spec) {
- ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
- tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
- lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
+ vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow);
+ ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
+ tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
+ lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
proto = ipv6_spec->hdr.proto;
}
if (ipv6_mask) {
- ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
- tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
- lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
+ vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow);
+ ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
+ tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
+ lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
/* Some of the PMD applications may set the protocol field
* in the IPv6 spec but don't set the mask. So, consider
* the mask in proto value calculation.
*/
proto &= ipv6_mask->hdr.proto;
+ proto_mask = ipv6_mask->hdr.proto;
}
size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
@@ -1092,7 +1146,8 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
* shall be addressed in the future.
*/
ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
- ULP_PRSR_ACT_MASK_IGNORE);
+ (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
+ ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
ULP_PRSR_ACT_MASK_IGNORE);
@@ -1107,7 +1162,8 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
ulp_rte_prsr_fld_mask(params, &idx, size,
ulp_deference_struct(ipv6_spec, hdr.proto),
ulp_deference_struct(ipv6_mask, hdr.proto),
- ULP_PRSR_ACT_MATCH_IGNORE);
+ (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
+ ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
ulp_rte_prsr_fld_mask(params, &idx, size,
@@ -1144,7 +1200,8 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
}
/* Update the field protocol hdr bitmap */
- ulp_rte_l3_proto_type_update(params, proto, inner_flag);
+ if (proto_mask)
+ ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
@@ -1280,7 +1337,8 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
+ ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
@@ -1385,7 +1443,8 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
+ ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
@@ -1403,6 +1462,7 @@ ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = 0;
+ uint16_t dport;
uint32_t size;
if (ulp_rte_prsr_fld_size_validate(params, &idx,
@@ -1442,6 +1502,15 @@ ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
/* Update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+
+ dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
+ if (!dport) {
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
+ ULP_UDP_PORT_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
+ ULP_UDP_PORT_VXLAN_MASK);
+ }
+
return BNXT_TF_RC_SUCCESS;
}
@@ -1637,6 +1706,8 @@ ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
{
const struct rte_flow_action_rss *rss;
struct ulp_rte_act_prop *ap = ¶m->act_prop;
+ uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)];
+ uint32_t idx = 0, id;
if (action_item == NULL || action_item->conf == NULL) {
BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
@@ -1652,12 +1723,50 @@ ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
&rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
- if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
- BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
+ if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
+ BNXT_TF_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* User may specify only key length. In that case, rss->key will be NULL.
+ * So, reject the flow if key_length is valid but rss->key is NULL.
+ * Also, copy the RSS hash key only when rss->key is valid.
+ */
+ if (rss->key_len != 0 && rss->key == NULL) {
+ BNXT_TF_DBG(ERR,
+ "Parse Err: A valid RSS key must be provided with a valid key len.\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ if (rss->key)
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len);
+
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
+ &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
+
+ if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
+ BNXT_TF_DBG(ERR, "Parse Err: RSS queue num too big\n");
return BNXT_TF_RC_ERROR;
}
- memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
- rss->key_len);
+
+ /* Queues converted into a bitmap format */
+ memset(queue_list, 0, sizeof(queue_list));
+ for (idx = 0; idx < rss->queue_num; idx++) {
+ id = rss->queue[idx];
+ if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
+ BNXT_TF_DBG(ERR, "Parse Err: RSS queue id too big\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >>
+ ((ULP_INDEX_BITMAP_SIZE - 1) -
+ (id % ULP_INDEX_BITMAP_SIZE)) & 1)) {
+ BNXT_TF_DBG(ERR, "Parse Err: duplicate queue ids\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL <<
+ ((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE)));
+ }
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
+ (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
/* set the RSS action header bit */
ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
@@ -2253,6 +2362,8 @@ ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
/* Set the action port */
ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
+ ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID,
+ ethdev_id);
return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
}
@@ -2484,6 +2595,63 @@ ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
return ret;
}
+int32_t
+ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params)
+{
+ const struct rte_flow_action_handle *handle;
+ struct bnxt_ulp_shared_act_info *act_info;
+ uint64_t action_bitmask;
+ uint32_t shared_action_type;
+ struct ulp_rte_act_prop *act = ¶ms->act_prop;
+ uint64_t tmp64;
+ enum bnxt_ulp_direction_type dir, handle_dir;
+ uint32_t act_info_entries = 0;
+ int32_t ret;
+
+ handle = action_item->conf;
+
+ /* Have to use the computed direction since the params->dir_attr
+ * can be different (transfer, ingress, egress)
+ */
+ dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+
+ /* direction of shared action must match direction of flow */
+ ret = bnxt_get_action_handle_direction(handle, &handle_dir);
+ if (ret || dir != handle_dir) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle or direction\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ if (bnxt_get_action_handle_type(handle, &shared_action_type)) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
+ if (shared_action_type >= act_info_entries || !act_info) {
+ BNXT_TF_DBG(ERR, "Invalid shared handle\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ action_bitmask = act_info[shared_action_type].act_bitmask;
+
+ /* shared actions of the same type cannot be repeated */
+ if (params->act_bitmap.bits & action_bitmask) {
+ BNXT_TF_DBG(ERR, "indirect actions cannot be repeated\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle));
+
+ memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
+ &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
+
+ ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask);
+
+ return BNXT_TF_RC_SUCCESS;
+}
+
/* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
int32_t
ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
@@ -2504,3 +2672,69 @@ ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
return ulp_rte_vxlan_decap_act_handler(NULL, params);
}
+
+/* Function to handle the parsing of RTE Flow action queue. */
+int32_t
+ulp_rte_queue_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *param)
+{
+ const struct rte_flow_action_queue *q_info;
+ struct ulp_rte_act_prop *ap = ¶m->act_prop;
+
+ if (action_item == NULL || action_item->conf == NULL) {
+ BNXT_TF_DBG(ERR, "Parse Err: invalid queue configuration\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ q_info = action_item->conf;
+ /* Copy the queue into the specific action properties */
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
+ &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
+
+ /* set the queue action header bit */
+ ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE);
+
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action set mac src.*/
+int32_t
+ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params)
+{
+ const struct rte_flow_action_set_mac *set_mac;
+ struct ulp_rte_act_prop *act = ¶ms->act_prop;
+
+ set_mac = action_item->conf;
+ if (set_mac) {
+ memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC],
+ set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC);
+ /* Update the hdr_bitmap with set mac src */
+ ULP_BITMAP_SET(params->act_bitmap.bits,
+ BNXT_ULP_ACT_BIT_SET_MAC_SRC);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: set mac src arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
+
+/* Function to handle the parsing of RTE Flow action set mac dst.*/
+int32_t
+ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params)
+{
+ const struct rte_flow_action_set_mac *set_mac;
+ struct ulp_rte_act_prop *act = ¶ms->act_prop;
+
+ set_mac = action_item->conf;
+ if (set_mac) {
+ memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST],
+ set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST);
+ /* Update the hdr_bitmap with set ipv4 dst */
+ ULP_BITMAP_SET(params->act_bitmap.bits,
+ BNXT_ULP_ACT_BIT_SET_MAC_DST);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: set mac dst arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
index b0b2b4f33f..401ce4885d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2014-2023 Broadcom
* All rights reserved.
*/
@@ -80,6 +80,16 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
void
bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params);
+/* Function to handle the parsing of RTE Flow item PF Header. */
+int32_t
+ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params);
+
+/* Function to handle the parsing of RTE Flow item VF Header. */
+int32_t
+ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params);
+
/* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
int32_t
ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
@@ -238,6 +248,15 @@ ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
int32_t
ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *action_item,
struct ulp_rte_parser_params *params);
+/* Function to handle the parsing of RTE Flow action set mac src.*/
+int32_t
+ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
+
+/* Function to handle the parsing of RTE Flow action set mac dst.*/
+int32_t
+ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
/* Function to handle the parsing of RTE Flow action JUMP .*/
int32_t
@@ -249,7 +268,7 @@ ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
struct ulp_rte_parser_params *params);
int32_t
-ulp_rte_shared_act_handler(const struct rte_flow_action *action_item,
+ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
struct ulp_rte_parser_params *params);
int32_t
@@ -259,4 +278,18 @@ ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
int32_t
ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
struct ulp_rte_parser_params *params);
+
+int32_t
+ulp_rte_queue_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_parser_params *param);
+
+/* Function to handle the parsing of RTE Flow action set mac src.*/
+int32_t
+ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
+
+/* Function to handle the parsing of RTE Flow action set mac dst.*/
+int32_t
+ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
#endif /* _ULP_RTE_PARSER_H_ */
--
2.39.2 (Apple Git-143)
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4218 bytes --]
next prev parent reply other threads:[~2023-06-28 16:31 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-04 17:36 [PATCH v3 00/11] sync Truflow support with latest release Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 01/11] net/bnxt: remove deprecated features Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 02/11] net/bnxt: update bnxt hsi structure Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 03/11] net/bnxt: update copyright date and cleanup whitespace Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 04/11] net/bnxt: update Truflow core Ajit Khaparde
2023-06-10 18:32 ` Thomas Monjalon
2023-06-28 16:29 ` [PATCH v4 00/11] sync Truflow support with latest release Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 01/11] net/bnxt: remove deprecated features Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 02/11] net/bnxt: update bnxt hsi structure Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 03/11] net/bnxt: update copyright date and cleanup whitespace Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 04/11] net/bnxt: update Truflow core Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 05/11] net/bnxt: update ULP shared session support Ajit Khaparde
2023-06-28 16:29 ` Ajit Khaparde [this message]
2023-06-28 16:29 ` [PATCH v4 07/11] net/bnxt: add support for rte meter Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 08/11] net/bnxt: update PTP support on Thor Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 09/11] net/bnxt: fix multi-root card support Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 10/11] net/bnxt: add support for eCPRI packet parsing Ajit Khaparde
2023-06-28 16:29 ` [PATCH v4 11/11] net/bnxt: set RSS config based on RSS mode Ajit Khaparde
2023-06-28 16:35 ` [PATCH v3 04/11] net/bnxt: update Truflow core Ajit Khaparde
2023-06-28 19:07 ` Thomas Monjalon
2023-06-29 4:30 ` Ajit Khaparde
2023-06-30 12:16 ` Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 05/11] net/bnxt: update ULP shared session support Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 06/11] net/bnxt: add RSS and Queue action in TruFLow Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 07/11] net/bnxt: add support for rte meter Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 08/11] net/bnxt: update PTP support on Thor Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 09/11] net/bnxt: fix multi-root card support Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 10/11] net/bnxt: add support for eCPRI packet parsing Ajit Khaparde
2023-05-04 17:36 ` [PATCH v3 11/11] net/bnxt: set RSS config based on RSS mode Ajit Khaparde
2023-05-10 17:16 ` [PATCH v3 00/11] sync Truflow support with latest release Ajit Khaparde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230628162927.92858-7-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=kishore.padmanabha@broadcom.com \
--cc=michael.baucom@broadcom.com \
--cc=shuanglin.wang@broadcom.com \
--cc=stuart.schacher@broadcom.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).