From: Junlong Wang <wang.junlong1@zte.com.cn>
To: stephen@networkplumber.org
Cc: dev@dpdk.org, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v1 16/16] net/zxdh: provide meter ops implementations
Date: Thu, 13 Feb 2025 14:41:32 +0800 [thread overview]
Message-ID: <20250213064134.88166-17-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20250213064134.88166-1-wang.junlong1@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 83882 bytes --]
provide meter ops implementations.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
doc/guides/nics/features/zxdh.ini | 8 +
doc/guides/nics/zxdh.rst | 5 +
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 81 +-
drivers/net/zxdh/zxdh_ethdev.h | 14 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 4 +
drivers/net/zxdh/zxdh_ethdev_ops.h | 1 +
drivers/net/zxdh/zxdh_msg.c | 196 +++++
drivers/net/zxdh/zxdh_msg.h | 48 ++
drivers/net/zxdh/zxdh_mtr.c | 1223 ++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_mtr.h | 114 +++
drivers/net/zxdh/zxdh_np.c | 465 +++++++++++
drivers/net/zxdh/zxdh_np.h | 222 +++++
drivers/net/zxdh/zxdh_tables.h | 3 +
14 files changed, 2383 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/zxdh/zxdh_mtr.c
create mode 100644 drivers/net/zxdh/zxdh_mtr.h
diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 3561e31666..9e31817b5e 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -24,4 +24,12 @@ RSS reta update = Y
Inner RSS = Y
Basic stats = Y
Stats per queue = Y
+Extended stats = Y
MTU update = Y
+FW version = Y
+Module EEPROM dump = Y
+L3 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
+LRO = Y
+TSO = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 30179c4e6f..26dd527196 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -36,6 +36,11 @@ Features of the ZXDH PMD are:
- Port hardware statistics
- MTU update
- Jumbo frames
+- Inner and Outer Checksum offload
+- Hardware LRO
+- Hardware TSO for generic IP or UDP tunnel, including VXLAN
+- Extended Statistics query
+- Ingress meter support
Driver compilation and testing
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index 48f8f5e1ee..a48a0d43c2 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -23,4 +23,5 @@ sources = files(
'zxdh_tables.c',
'zxdh_rxtx.c',
'zxdh_ethdev_ops.c',
+ 'zxdh_mtr.c',
)
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 5546b6bfc3..2c24e99a77 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -24,6 +24,7 @@ const char *ZXDH_PMD_SHARED_DATA_MZ = "zxdh_pmd_shared_data";
rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
struct zxdh_dev_shared_data g_dev_sd[ZXDH_SLOT_MAX];
struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];
+struct zxdh_mtr_res g_mtr_res;
#define ZXDH_INVALID_DTBQUE 0xFFFF
#define ZXDH_INVALID_SLOT_IDX 0xFFFF
@@ -1408,6 +1409,7 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
.get_module_info = zxdh_dev_get_module_info,
.get_module_eeprom = zxdh_dev_get_module_eeprom,
.dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,
+ .mtr_ops_get = zxdh_meter_ops_get,
};
static int32_t
@@ -1637,6 +1639,72 @@ zxdh_init_shared_data(void)
return ret;
}
+static void
+zxdh_free_sh_res(void)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_spinlock_lock(&zxdh_shared_data_lock);
+ if (zxdh_shared_data != NULL && zxdh_shared_data->init_done &&
+ (--zxdh_shared_data->dev_refcnt == 0)) {
+ rte_mempool_free(zxdh_shared_data->mtr_mp);
+ rte_mempool_free(zxdh_shared_data->mtr_profile_mp);
+ rte_mempool_free(zxdh_shared_data->mtr_policy_mp);
+ }
+ rte_spinlock_unlock(&zxdh_shared_data_lock);
+ }
+}
+
+static int
+zxdh_init_sh_res(struct zxdh_shared_data *sd)
+{
+ const char *MZ_ZXDH_MTR_MP = "zxdh_mtr_mempool";
+ const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";
+ const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";
+ struct rte_mempool *flow_mp = NULL;
+ struct rte_mempool *mtr_mp = NULL;
+ struct rte_mempool *mtr_profile_mp = NULL;
+ struct rte_mempool *mtr_policy_mp = NULL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, ZXDH_MAX_MTR_NUM,
+ sizeof(struct zxdh_mtr_object), 64, 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (mtr_mp == NULL) {
+ PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr mempool");
+ goto error;
+ }
+ mtr_profile_mp = rte_mempool_create(MZ_ZXDH_MTR_PROFILE_MP,
+ MAX_MTR_PROFILE_NUM, sizeof(struct zxdh_meter_profile),
+ 64, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY, 0);
+ if (mtr_profile_mp == NULL) {
+ PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
+ goto error;
+ }
+ mtr_policy_mp = rte_mempool_create(MZ_ZXDH_MTR_POLICY_MP,
+ ZXDH_MAX_POLICY_NUM, sizeof(struct zxdh_meter_policy),
+ 64, 0, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (mtr_policy_mp == NULL) {
+ PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
+ goto error;
+ }
+ sd->mtr_mp = mtr_mp;
+ sd->mtr_profile_mp = mtr_profile_mp;
+ sd->mtr_policy_mp = mtr_policy_mp;
+ TAILQ_INIT(&zxdh_shared_data->meter_profile_list);
+ TAILQ_INIT(&zxdh_shared_data->mtr_list);
+ TAILQ_INIT(&zxdh_shared_data->mtr_policy_list);
+ }
+ return 0;
+
+error:
+ rte_mempool_free(mtr_policy_mp);
+ rte_mempool_free(mtr_profile_mp);
+ rte_mempool_free(mtr_mp);
+ rte_mempool_free(flow_mp);
+ return -rte_errno;
+}
+
static int
zxdh_init_once(struct rte_eth_dev *eth_dev)
{
@@ -1664,8 +1732,16 @@ zxdh_init_once(struct rte_eth_dev *eth_dev)
goto out;
}
/* RTE_PROC_PRIMARY */
- if (!sd->init_done)
+ if (!sd->init_done) {
+ /*shared struct and res init */
+ ret = zxdh_init_sh_res(sd);
+ if (ret != 0)
+ goto out;
+ rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);
+ memset(&g_mtr_res, 0, sizeof(g_mtr_res));
sd->init_done = true;
+ }
+
sd->dev_refcnt++;
out:
@@ -1879,6 +1955,9 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
zxdh_np_uninit(eth_dev);
zxdh_bar_msg_chan_exit();
zxdh_priv_res_free(hw);
+ zxdh_free_sh_res();
+ rte_free(hw->dev_sd);
+ hw->dev_sd = NULL;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
return ret;
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 87c3b5f022..6ad714d29a 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -10,6 +10,8 @@
#include <rte_interrupts.h>
#include <eal_interrupts.h>
+#include "zxdh_mtr.h"
+
/* ZXDH PCI vendor/device ID. */
#define ZXDH_PCI_VENDOR_ID 0x1cf2
@@ -113,7 +115,10 @@ struct zxdh_hw {
uint8_t use_msix;
uint8_t duplex;
- uint8_t is_pf;
+ uint8_t is_pf : 1,
+ rsv : 1,
+ i_mtr_en : 1,
+ e_mtr_en : 1;
uint8_t msg_chan_init;
uint8_t phyport;
uint8_t panel_id;
@@ -157,6 +162,13 @@ struct zxdh_shared_data {
int32_t np_init_done;
uint32_t dev_refcnt;
struct zxdh_dtb_shared_data *dtb_data;
+
+ struct rte_mempool *mtr_mp;
+ struct rte_mempool *mtr_profile_mp;
+ struct rte_mempool *mtr_policy_mp;
+ struct zxdh_mtr_profile_list meter_profile_list;
+ struct zxdh_mtr_list mtr_list;
+ struct zxdh_mtr_policy_list mtr_policy_list;
};
struct zxdh_dev_shared_data {
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 3e88860765..512e1cce2e 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -14,6 +14,7 @@
#include "zxdh_rxtx.h"
#include "zxdh_np.h"
#include "zxdh_queue.h"
+#include "zxdh_mtr.h"
#define ZXDH_VLAN_FILTER_GROUPS 64
#define ZXDH_INVALID_LOGIC_QID 0xFFFFU
@@ -1624,6 +1625,9 @@ zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.rx_mtu_drop_pkts;
stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.tx_mtu_drop_pkts;
+ if (hw->i_mtr_en || hw->e_mtr_en)
+ stats->imissed += np_stats.rx_mtr_drop_pkts;
+
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[i];
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 08b9e3341b..ead571067c 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -108,5 +108,6 @@ int zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
int zxdh_dev_get_module_info(struct rte_eth_dev *dev,
struct rte_eth_dev_module_info *modinfo);
int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+int zxdh_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
#endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 0790cb2291..00a8db4a69 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -10,6 +10,7 @@
#include <rte_cycles.h>
#include <inttypes.h>
#include <rte_malloc.h>
+#include "rte_mtr_driver.h"
#include "zxdh_ethdev.h"
#include "zxdh_logs.h"
@@ -1693,6 +1694,12 @@ zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
case ZXDH_PORT_LRO_OFFLOAD_FLAG:
port_attr.lro_offload = attr_msg->value;
break;
+ case ZXDH_PORT_EGRESS_METER_EN_OFF_FLAG:
+ port_attr.egress_meter_enable = attr_msg->value;
+ break;
+ case ZXDH_PORT_INGRESS_METER_EN_OFF_FLAG:
+ port_attr.ingress_meter_mode = attr_msg->value;
+ break;
default:
PMD_DRV_LOG(ERR, "unsupport attr 0x%x set", attr_msg->mode);
return -1;
@@ -1846,6 +1853,190 @@ zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,
return 0;
}
+static int
+zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw __rte_unused,
+ uint16_t vport, void *cfg_data,
+ struct zxdh_msg_reply_body *res_info,
+ uint16_t *res_len)
+{
+ struct zxdh_mtr_stats_query *zxdh_mtr_stats_query =
+ (struct zxdh_mtr_stats_query *)cfg_data;
+ union zxdh_virport_num v_port = {.vport = vport};
+ int ret = 0;
+
+ uint32_t stat_baseaddr = zxdh_mtr_stats_query->direction ==
+ ZXDH_EGRESS ?
+ ZXDH_MTR_STATS_EGRESS_BASE : ZXDH_MTR_STATS_INGRESS_BASE;
+ uint32_t idx = zxdh_vport_to_vfid(v_port) + stat_baseaddr;
+
+ if (!res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "get stat invalid in params");
+ return -1;
+ }
+ res_info->flag = ZXDH_REPS_FAIL;
+ ret = zxdh_np_dtb_stats_get(pf_hw->dev_id, pf_hw->dev_sd->dtb_sd.queueid,
+ 1, idx, (uint32_t *)&res_info->hw_mtr_stats);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get dir %d stats failed", zxdh_mtr_stats_query->direction);
+ return ret;
+ }
+ res_info->flag = ZXDH_REPS_SUCC;
+ *res_len = sizeof(struct zxdh_hw_mtr_stats);
+ return 0;
+}
+
+static int
+zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw __rte_unused,
+ uint16_t vport,
+ void *cfg_data,
+ struct zxdh_msg_reply_body *res_info,
+ uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, " get profileid invalid inparams");
+ return -1;
+ }
+ struct rte_mtr_error error = {0};
+ int ret = 0;
+ uint64_t profile_id = HW_PROFILE_MAX;
+
+ struct zxdh_plcr_profile_add *zxdh_plcr_profile_add =
+ (struct zxdh_plcr_profile_add *)cfg_data;
+
+ res_info->flag = ZXDH_REPS_FAIL;
+ *res_len = sizeof(struct zxdh_mtr_profile_info);
+ ret = zxdh_hw_profile_alloc_direct(pf_hw->eth_dev,
+ zxdh_plcr_profile_add->car_type,
+ &profile_id, &error);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x alloc hw profile failed",
+ pf_hw->vport.vport,
+ vport
+ );
+ return -1;
+ }
+ zxdh_hw_profile_ref(profile_id);
+ res_info->mtr_profile_info.profile_id = profile_id;
+ res_info->flag = ZXDH_REPS_SUCC;
+
+ return 0;
+}
+
+static int
+zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,
+ uint16_t vport,
+ void *cfg_data,
+ struct zxdh_msg_reply_body *res_info,
+ uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, " del profileid invalid inparams");
+ return -1;
+ }
+
+ res_info->flag = ZXDH_REPS_FAIL;
+ *res_len = 0;
+ struct zxdh_plcr_profile_free *mtr_profile_free = (struct zxdh_plcr_profile_free *)cfg_data;
+ uint64_t profile_id = mtr_profile_free->profile_id;
+ struct rte_mtr_error error = {0};
+ int ret;
+
+ if (profile_id >= HW_PROFILE_MAX) {
+ PMD_DRV_LOG(ERR, " del profileid invalid inparams");
+ return -rte_mtr_error_set(&error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload del profile failed profilie id invalid ");
+ }
+
+ ret = zxdh_hw_profile_unref(pf_hw->eth_dev, mtr_profile_free->car_type, profile_id, &error);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ " del hw vport %d profile %d failed. code:%d",
+ vport,
+ mtr_profile_free->profile_id,
+ ret
+ );
+ return -rte_mtr_error_set(&error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload del profile failed ");
+ }
+ res_info->flag = ZXDH_REPS_SUCC;
+ return 0;
+}
+
+static int
+zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,
+ uint16_t vport,
+ void *cfg_data,
+ struct zxdh_msg_reply_body *res_info,
+ uint16_t *res_len)
+{
+ int ret = 0;
+
+ if (!cfg_data || !res_info || !res_len) {
+ PMD_DRV_LOG(ERR, " (vport %d) flow bind failed invalid inparams", vport);
+ return -1;
+ }
+ struct rte_mtr_error error = {0};
+ struct zxdh_plcr_flow_cfg *zxdh_plcr_flow_cfg = (struct zxdh_plcr_flow_cfg *)cfg_data;
+
+ res_info->flag = ZXDH_REPS_FAIL;
+ *res_len = 0;
+ ret = zxdh_np_stat_car_queue_cfg_set(pf_hw->dev_id,
+ zxdh_plcr_flow_cfg->car_type,
+ zxdh_plcr_flow_cfg->flow_id,
+ zxdh_plcr_flow_cfg->drop_flag,
+ zxdh_plcr_flow_cfg->plcr_en,
+ (uint64_t)zxdh_plcr_flow_cfg->profile_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ " dpp_stat_car_queue_cfg_set failed flowid %d profile id %d. code:%d",
+ zxdh_plcr_flow_cfg->flow_id,
+ zxdh_plcr_flow_cfg->profile_id,
+ ret
+ );
+ return -rte_mtr_error_set(&error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL, "Failed to bind plcr flow.");
+ }
+ res_info->flag = ZXDH_REPS_SUCC;
+ return 0;
+}
+
+static int
+zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
+ uint16_t vport,
+ void *cfg_data,
+ struct zxdh_msg_reply_body *res_info,
+ uint16_t *res_len)
+{
+ int ret = 0;
+
+ if (!cfg_data || !res_info || !res_len) {
+ PMD_DRV_LOG(ERR, " cfg profile invalid inparams");
+ return -1;
+ }
+ res_info->flag = ZXDH_REPS_FAIL;
+ *res_len = 0;
+ struct rte_mtr_error error = {0};
+ struct zxdh_plcr_profile_cfg *zxdh_plcr_profile_cfg =
+ (struct zxdh_plcr_profile_cfg *)cfg_data;
+ union zxdh_offload_profile_cfg *plcr_param = &zxdh_plcr_profile_cfg->plcr_param;
+
+ ret = zxdh_np_car_profile_cfg_set(vport,
+ zxdh_plcr_profile_cfg->car_type,
+ zxdh_plcr_profile_cfg->packet_mode,
+ zxdh_plcr_profile_cfg->hw_profile_id,
+ plcr_param);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "(vport %d)config hw profilefailed", vport);
+ return -rte_mtr_error_set(&error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL, "Meter offload cfg profile failed");
+ }
+ res_info->flag = ZXDH_REPS_SUCC;
+ return 0;
+}
+
zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_NULL] = NULL,
[ZXDH_VF_PORT_INIT] = zxdh_vf_port_init,
@@ -1863,6 +2054,11 @@ zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_VLAN_OFFLOAD] = zxdh_vf_set_vlan_offload,
[ZXDH_PORT_ATTRS_SET] = zxdh_vf_port_attr_set,
[ZXDH_GET_NP_STATS] = zxdh_vf_np_stats_update,
+ [ZXDH_PORT_METER_STAT_GET] = zxdh_vf_mtr_hw_stats_get,
+ [ZXDH_PLCR_CAR_PROFILE_ID_ADD] = zxdh_vf_mtr_hw_profile_add,
+ [ZXDH_PLCR_CAR_PROFILE_ID_DELETE] = zxdh_vf_mtr_hw_profile_del,
+ [ZXDH_PLCR_CAR_QUEUE_CFG_SET] = zxdh_vf_mtr_hw_plcrflow_cfg,
+ [ZXDH_PLCR_CAR_PROFILE_CFG_SET] = zxdh_vf_mtr_hw_profile_cfg,
};
static inline int
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index b3e202c026..2606f36d21 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -10,6 +10,7 @@
#include <ethdev_driver.h>
#include "zxdh_ethdev_ops.h"
+#include "zxdh_mtr.h"
#define ZXDH_BAR0_INDEX 0
#define ZXDH_CTRLCH_OFFSET (0x2000)
@@ -230,6 +231,11 @@ enum zxdh_msg_type {
ZXDH_PORT_PROMISC_SET = 26,
ZXDH_GET_NP_STATS = 31,
+ ZXDH_PLCR_CAR_PROFILE_ID_ADD = 36,
+ ZXDH_PLCR_CAR_PROFILE_ID_DELETE = 37,
+ ZXDH_PLCR_CAR_PROFILE_CFG_SET,
+ ZXDH_PLCR_CAR_QUEUE_CFG_SET = 40,
+ ZXDH_PORT_METER_STAT_GET = 42,
ZXDH_MSG_TYPE_END,
};
@@ -373,6 +379,10 @@ struct __rte_packed_begin zxdh_rss_hf {
uint32_t rss_hf;
} __rte_packed_end;
+struct zxdh_mtr_profile_info {
+ uint64_t profile_id;
+};
+
struct __rte_packed_begin zxdh_msg_reply_body {
enum zxdh_reps_flag flag;
union __rte_packed_begin {
@@ -386,6 +396,8 @@ struct __rte_packed_begin zxdh_msg_reply_body {
struct zxdh_np_stats_updata_msg np_stats_query;
struct zxdh_flash_msg flash_msg;
struct zxdh_mac_module_eeprom_msg module_eeprom_msg;
+ struct zxdh_mtr_profile_info mtr_profile_info;
+ struct zxdh_mtr_stats hw_mtr_stats;
} __rte_packed_end;
} __rte_packed_end;
@@ -443,6 +455,37 @@ struct zxdh_rss_enable {
uint8_t enable;
};
+struct __rte_packed_begin zxdh_plcr_profile_add {
+ uint8_t car_type;/* 0 :carA ; 1:carB ;2 carC*/
+} __rte_packed_end;
+
+struct __rte_packed_begin zxdh_mtr_stats_query {
+ uint8_t direction;
+ uint8_t is_clr;
+} __rte_packed_end;
+
+struct __rte_packed_begin zxdh_plcr_profile_cfg {
+ uint8_t car_type; /* 0 :carA ; 1:carB ;2 carC*/
+ uint8_t packet_mode; /*0 bps 1 pps */
+ uint16_t hw_profile_id;
+ union zxdh_offload_profile_cfg plcr_param;
+} __rte_packed_end;
+
+struct __rte_packed_begin zxdh_plcr_flow_cfg {
+ uint8_t car_type; /* 0:carA; 1:carB; 2:carC */
+ uint8_t drop_flag; /* default */
+ uint8_t plcr_en; /* 1:bind, 0:unbind */
+ uint8_t rsv;
+ uint16_t flow_id;
+ uint16_t profile_id;
+} __rte_packed_end;
+
+struct __rte_packed_begin zxdh_plcr_profile_free {
+ uint8_t car_type;
+ uint8_t rsv;
+ uint16_t profile_id;
+} __rte_packed_end;
+
struct __rte_packed_begin zxdh_agent_msg_head {
enum zxdh_agent_msg_type msg_type;
uint8_t panel_id;
@@ -473,6 +516,11 @@ struct __rte_packed_begin zxdh_msg_info {
struct zxdh_rss_hf rss_hf;
struct zxdh_np_stats_updata_msg np_stats_query;
struct zxdh_mac_module_eeprom_msg module_eeprom_msg;
+ struct zxdh_plcr_profile_add zxdh_plcr_profile_add;
+ struct zxdh_plcr_profile_free zxdh_plcr_profile_free;
+ struct zxdh_plcr_profile_cfg zxdh_plcr_profile_cfg;
+ struct zxdh_plcr_flow_cfg zxdh_plcr_flow_cfg;
+ struct zxdh_mtr_stats_query zxdh_mtr_stats_query;
} __rte_packed_end data;
} __rte_packed_end;
diff --git a/drivers/net/zxdh/zxdh_mtr.c b/drivers/net/zxdh/zxdh_mtr.c
new file mode 100644
index 0000000000..09e601d336
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr.c
@@ -0,0 +1,1223 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <bus_pci_driver.h>
+#include <rte_ethdev.h>
+#include <rte_mtr_driver.h>
+#include <rte_mempool.h>
+
+#include "zxdh_logs.h"
+#include "zxdh_mtr.h"
+#include "zxdh_msg.h"
+#include "zxdh_ethdev.h"
+#include "zxdh_tables.h"
+
+#define ZXDH_SHARE_FLOW_MAX 2048
+#define ZXDH_HW_PROFILE_MAX 512
+#define ZXDH_MAX_MTR_PROFILE_NUM ZXDH_HW_PROFILE_MAX
+#define ZXDH_PORT_MTR_FID_BASE 8192
+
+/* Maximum value of srTCM metering parameters, unit_step: 64kb
+ * 61K~400000000(400G) bps, uint 64Kbps CBS/EBS/PBS max bucket depth 128MB
+ * PPS: 1pps~600Mpps
+ */
+#define ZXDH_SRTCM_CIR_MIN_BPS (61 * (1ULL << 10))
+#define ZXDH_SRTCM_CIR_MAX_BPS (400 * (1ULL << 30))
+#define ZXDH_SRTCM_EBS_MAX_B (128 * (1ULL << 20))
+#define ZXDH_SRTCM_CBS_MAX_B (128 * (1ULL << 20))
+#define ZXDH_TRTCM_PBS_MAX_B (128 * (1ULL << 20))
+#define ZXDH_TRTCM_PIR_MAX_BPS (400 * (1ULL << 30))
+#define ZXDH_TRTCM_PIR_MIN_BPS (61 * (1ULL << 10))
+
+#define ZXDH_SRTCM_CIR_MIN_PPS (1)
+#define ZXDH_SRTCM_CIR_MAX_PPS (200 * (1ULL << 20))
+#define ZXDH_SRTCM_CBS_MAX_P (8192)
+#define ZXDH_SRTCM_EBS_MAX_P (8192)
+#define ZXDH_TRTCM_PBS_MAX_P (8192)
+#define ZXDH_TRTCM_PIR_MIN_PPS (1)
+#define ZXDH_TRTCM_PIR_MAX_PPS (200 * (1ULL << 20))
+
+#define ZXDH_MP_ALLOC_OBJ_FUNC(mp, obj) rte_mempool_get(mp, (void **)&(obj))
+#define ZXDH_MP_FREE_OBJ_FUNC(mp, obj) rte_mempool_put(mp, obj)
+
+#define ZXDH_VFUNC_ACTIVE_BIT 11
+#define ZXDH_VFUNC_NUM_MASK 0xff
+#define ZXDH_GET_OWNER_PF_VPORT(vport) \
+ (((vport) & ~(ZXDH_VFUNC_NUM_MASK)) & (~(1 << ZXDH_VFUNC_ACTIVE_BIT)))
+
+enum ZXDH_PLCR_CD {
+ ZXDH_PLCR_CD_SRTCM = 0,
+ ZXDH_PLCR_CD_TRTCM,
+ ZXDH_PLCR_CD_MEF101,
+};
+enum ZXDH_PLCR_CM {
+ ZXDH_PLCR_CM_BLIND = 0,
+ ZXDH_PLCR_CM_AWARE,
+};
+enum ZXDH_PLCR_CF {
+ ZXDH_PLCR_CF_UNOVERFLOW = 0,
+ ZXDH_PLCR_CF_OVERFLOW,
+};
+
+int
+zxdh_hw_profile_ref(uint16_t hw_profile_id)
+{
+ if (hw_profile_id >= HW_PROFILE_MAX)
+ return -1;
+
+ rte_spinlock_lock(&g_mtr_res.hw_plcr_res_lock);
+ g_mtr_res.hw_profile_refcnt[hw_profile_id]++;
+ rte_spinlock_unlock(&g_mtr_res.hw_plcr_res_lock);
+ return 0;
+}
+
+static struct zxdh_meter_policy
+*zxdh_mtr_policy_find_by_id(struct zxdh_mtr_policy_list *mtr_policy_list,
+ uint16_t policy_id, uint16_t dpdk_portid)
+{
+ struct zxdh_meter_policy *mtr_policy = NULL;
+
+ TAILQ_FOREACH(mtr_policy, mtr_policy_list, next) {
+ if (policy_id == mtr_policy->policy_id &&
+ dpdk_portid == mtr_policy->dpdk_port_id)
+ return mtr_policy;
+ }
+ return NULL;
+}
+
+static int
+zxdh_policy_validate_actions(const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error)
+{
+ if (!actions[RTE_COLOR_RED] || actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+ "Red color only supports drop action.");
+ return 0;
+}
+
+static int
+mtr_hw_stats_get(struct zxdh_hw *hw, uint8_t direction, struct zxdh_hw_mtr_stats *hw_mtr_stats)
+{
+ union zxdh_virport_num v_port = hw->vport;
+ uint32_t stat_baseaddr = (direction == ZXDH_EGRESS)
+ ? ZXDH_MTR_STATS_EGRESS_BASE
+ : ZXDH_MTR_STATS_INGRESS_BASE;
+ uint32_t idx = zxdh_vport_to_vfid(v_port) + stat_baseaddr;
+ struct zxdh_dtb_shared_data *dtb_sd = &hw->dev_sd->dtb_sd;
+
+ int ret = zxdh_np_dtb_stats_get(hw->dev_id,
+ dtb_sd->queueid, ZXDH_STAT_128_MODE,
+ idx, (uint32_t *)hw_mtr_stats);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get vport 0x%x (vfid 0x%x) dir %u stats failed",
+ v_port.vport,
+ hw->vfid,
+ direction);
+ return ret;
+ }
+ PMD_DRV_LOG(INFO, "get vport 0x%x (vfid 0x%x) dir %u stats",
+ v_port.vport,
+ hw->vfid,
+ direction);
+ return 0;
+}
+
+static int
+zxdh_mtr_stats_get(struct rte_eth_dev *dev, int dir, struct zxdh_mtr_stats *mtr_stats)
+{
+ struct zxdh_hw_mtr_stats hw_mtr_stat = {0};
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = mtr_hw_stats_get(hw, dir, &hw_mtr_stat);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %u dir %u get mtr stats failed", hw->vport.vport, dir);
+ return ret;
+ }
+ mtr_stats->n_bytes_dropped =
+ (uint64_t)(rte_le_to_cpu_32(hw_mtr_stat.n_bytes_dropped_hi)) << 32 |
+ rte_le_to_cpu_32(hw_mtr_stat.n_bytes_dropped_lo);
+ mtr_stats->n_pkts_dropped =
+ (uint64_t)(rte_le_to_cpu_32(hw_mtr_stat.n_pkts_dropped_hi)) << 32 |
+ rte_le_to_cpu_32(hw_mtr_stat.n_pkts_dropped_lo);
+
+ return 0;
+}
+
+static int
+zxdh_meter_cap_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_mtr_capabilities *cap,
+ struct rte_mtr_error *error __rte_unused)
+{
+ struct rte_mtr_capabilities capa = {
+ .n_max = ZXDH_MAX_MTR_NUM,
+ .n_shared_max = ZXDH_SHARE_FLOW_MAX,
+ .meter_srtcm_rfc2697_n_max = ZXDH_MAX_MTR_PROFILE_NUM,
+ .meter_trtcm_rfc2698_n_max = ZXDH_MAX_MTR_PROFILE_NUM,
+ .color_aware_srtcm_rfc2697_supported = 1,
+ .color_aware_trtcm_rfc2698_supported = 1,
+ .meter_rate_max = ZXDH_SRTCM_CIR_MAX_BPS,
+ .meter_policy_n_max = ZXDH_MAX_POLICY_NUM,
+ .srtcm_rfc2697_byte_mode_supported = 1,
+ .srtcm_rfc2697_packet_mode_supported = 1,
+ .trtcm_rfc2698_byte_mode_supported = 1,
+ .trtcm_rfc2698_packet_mode_supported = 1,
+ .stats_mask = RTE_MTR_STATS_N_PKTS_DROPPED | RTE_MTR_STATS_N_BYTES_DROPPED,
+ };
+
+ memcpy(cap, &capa, sizeof(capa));
+ return 0;
+}
+
+static int
+zxdh_mtr_profile_validate(uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ uint64_t cir_min, cir_max, cbs_max, ebs_max, pir_min, pir_max, pbs_max;
+
+ if (profile == NULL || meter_profile_id >= ZXDH_MAX_MTR_PROFILE_NUM) {
+ return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+ "Meter profile param id invaild or null");
+ }
+
+ if (profile->packet_mode == 0) {
+ cir_min = ZXDH_SRTCM_CIR_MIN_BPS / 8;
+ cir_max = ZXDH_SRTCM_CIR_MAX_BPS / 8;
+ cbs_max = ZXDH_SRTCM_CBS_MAX_B;
+ ebs_max = ZXDH_SRTCM_EBS_MAX_B;
+ pir_min = ZXDH_TRTCM_PIR_MIN_BPS / 8;
+ pir_max = ZXDH_TRTCM_PIR_MAX_BPS / 8;
+ pbs_max = ZXDH_TRTCM_PBS_MAX_B;
+ } else {
+ cir_min = ZXDH_SRTCM_CIR_MIN_PPS;
+ cir_max = ZXDH_SRTCM_CIR_MAX_PPS;
+ cbs_max = ZXDH_SRTCM_CBS_MAX_P;
+ ebs_max = ZXDH_SRTCM_EBS_MAX_P;
+ pir_min = ZXDH_TRTCM_PIR_MIN_PPS;
+ pir_max = ZXDH_TRTCM_PIR_MAX_PPS;
+ pbs_max = ZXDH_TRTCM_PBS_MAX_P;
+ }
+ if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+ if (profile->srtcm_rfc2697.cir >= cir_min &&
+ profile->srtcm_rfc2697.cir < cir_max &&
+ profile->srtcm_rfc2697.cbs < cbs_max &&
+ profile->srtcm_rfc2697.cbs > 0 &&
+ profile->srtcm_rfc2697.ebs > 0 &&
+ profile->srtcm_rfc2697.ebs < ebs_max) {
+ goto check_exist;
+ } else {
+ return -rte_mtr_error_set
+ (error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "Invalid metering parameters");
+ }
+ } else if (profile->alg == RTE_MTR_TRTCM_RFC2698) {
+ if (profile->trtcm_rfc2698.cir >= cir_min &&
+ profile->trtcm_rfc2698.cir < cir_max &&
+ profile->trtcm_rfc2698.cbs < cbs_max &&
+ profile->trtcm_rfc2698.cbs > 0 &&
+ profile->trtcm_rfc2698.pir >= pir_min &&
+ profile->trtcm_rfc2698.pir < pir_max &&
+ profile->trtcm_rfc2698.cir < profile->trtcm_rfc2698.pir &&
+ profile->trtcm_rfc2698.pbs > 0 &&
+ profile->trtcm_rfc2698.pbs < pbs_max)
+ goto check_exist;
+ else
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "Invalid metering parameters");
+ } else {
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "algorithm not supported");
+ }
+
+check_exist:
+ return 0;
+}
+
+static struct zxdh_meter_profile
+*zxdh_mtr_profile_find_by_id(struct zxdh_mtr_profile_list *mpl,
+ uint32_t meter_profile_id, uint16_t dpdk_portid)
+{
+ struct zxdh_meter_profile *mp = NULL;
+
+ TAILQ_FOREACH(mp, mpl, next) {
+ if (meter_profile_id == mp->meter_profile_id && mp->dpdk_port_id == dpdk_portid)
+ return mp;
+ }
+ return NULL;
+}
+
+static struct zxdh_meter_profile
+*zxdh_mtr_profile_res_alloc(struct rte_mempool *mtr_profile_mp)
+{
+ struct zxdh_meter_profile *meter_profile = NULL;
+
+ if (ZXDH_MP_ALLOC_OBJ_FUNC(mtr_profile_mp, meter_profile) != 0)
+ return NULL;
+
+ return meter_profile;
+}
+
+static struct zxdh_meter_policy
+*zxdh_mtr_policy_res_alloc(struct rte_mempool *mtr_policy_mp)
+{
+ struct zxdh_meter_policy *policy = NULL;
+
+ rte_mempool_get(mtr_policy_mp, (void **)&policy);
+ PMD_DRV_LOG(INFO, "policy %p", policy);
+ return policy;
+}
+
+static int
+zxdh_hw_profile_free_direct(struct rte_eth_dev *dev, ZXDH_PROFILE_TYPE car_type,
+ uint16_t hw_profile_id, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t vport = hw->vport.vport;
+ int ret = zxdh_np_car_profile_id_delete(vport, car_type,
+ (uint64_t)hw_profile_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %u free hw profile %u failed", vport, hw_profile_id);
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter free profile failed");
+ }
+
+ return 0;
+}
+
+int
+zxdh_hw_profile_alloc_direct(struct rte_eth_dev *dev, ZXDH_PROFILE_TYPE car_type,
+ uint64_t *hw_profile_id, struct rte_mtr_error *error)
+{
+ uint64_t profile_id = HW_PROFILE_MAX;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t vport = hw->vport.vport;
+ int ret = zxdh_np_car_profile_id_add(vport, car_type, &profile_id);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %u alloc hw profile failed", vport);
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload alloc profile failed");
+ }
+ *hw_profile_id = profile_id;
+ if (*hw_profile_id == ZXDH_HW_PROFILE_MAX) {
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload alloc profile id invalid");
+ }
+
+ return 0;
+}
+
+static uint16_t
+zxdh_hw_profile_free(struct rte_eth_dev *dev, uint8_t car_type,
+ uint16_t hw_profile_id, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_hw_profile_free_direct(dev, car_type, (uint64_t)hw_profile_id, error);
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_msg_reply_info reply_info = {0};
+ struct zxdh_plcr_profile_free *zxdh_plcr_profile_free =
+ &msg_info.data.zxdh_plcr_profile_free;
+
+ zxdh_plcr_profile_free->profile_id = hw_profile_id;
+ zxdh_plcr_profile_free->car_type = car_type;
+ zxdh_msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_ID_DELETE, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+ ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_free),
+ &reply_info, sizeof(struct zxdh_msg_reply_info));
+
+ if (ret)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter free profile failed ");
+ }
+
+ return ret;
+}
+
+static int
+zxdh_hw_profile_alloc(struct rte_eth_dev *dev, uint64_t *hw_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_hw_profile_alloc_direct(dev, CAR_A, hw_profile_id, error);
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_msg_reply_info reply_info = {0};
+ struct zxdh_plcr_profile_add *zxdh_plcr_profile_add =
+ &msg_info.data.zxdh_plcr_profile_add;
+
+ zxdh_plcr_profile_add->car_type = CAR_A;
+ zxdh_msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_ID_ADD, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+ ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_add),
+ &reply_info, sizeof(struct zxdh_msg_reply_info));
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to send msg: port 0x%x msg type ZXDH_PLCR_CAR_PROFILE_ID_ADD ",
+ hw->vport.vport);
+
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload alloc profile id msg failed ");
+ }
+ *hw_profile_id = reply_info.reply_body.mtr_profile_info.profile_id;
+ if (*hw_profile_id == ZXDH_HW_PROFILE_MAX) {
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+ "Meter offload alloc profile id invalid ");
+ }
+ }
+
+ return ret;
+}
+
+int
+zxdh_hw_profile_unref(struct rte_eth_dev *dev,
+ uint8_t car_type,
+ uint16_t hw_profile_id,
+ struct rte_mtr_error *error)
+{
+ if (hw_profile_id >= ZXDH_HW_PROFILE_MAX)
+ return -1;
+
+ rte_spinlock_lock(&g_mtr_res.hw_plcr_res_lock);
+ if (g_mtr_res.hw_profile_refcnt[hw_profile_id] == 0) {
+ PMD_DRV_LOG(ERR, "del hw profile id %d but ref 0", hw_profile_id);
+ rte_spinlock_unlock(&g_mtr_res.hw_plcr_res_lock);
+ return -1;
+ }
+ if (--g_mtr_res.hw_profile_refcnt[hw_profile_id] == 0) {
+ PMD_DRV_LOG(INFO, "del hw profile id %d ", hw_profile_id);
+ zxdh_hw_profile_free(dev, car_type, hw_profile_id, error);
+ }
+ rte_spinlock_unlock(&g_mtr_res.hw_plcr_res_lock);
+ return 0;
+}
+
+static int
+zxdh_mtr_hw_counter_query(struct rte_eth_dev *dev,
+ bool clear,
+ bool dir,
+ struct zxdh_mtr_stats *mtr_stats,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_mtr_stats_get(dev, dir, mtr_stats);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "ZXDH_PORT_METER_STAT_GET port %u dir %d failed",
+ hw->vport.vport,
+ dir);
+
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_STATS, NULL, "Failed to bind plcr flow.");
+ }
+ } else { /* send msg to pf */
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_msg_reply_info reply_info = {0};
+ struct zxdh_mtr_stats_query *zxdh_mtr_stats_query =
+ &msg_info.data.zxdh_mtr_stats_query;
+
+ zxdh_mtr_stats_query->direction = dir;
+ zxdh_mtr_stats_query->is_clr = !!clear;
+ zxdh_msg_head_build(hw, ZXDH_PORT_METER_STAT_GET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev,
+ &msg_info,
+ sizeof(msg_info),
+ &reply_info,
+ sizeof(struct zxdh_msg_reply_info));
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",
+ hw->vport.vport);
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_STATS, NULL, "Meter offload alloc profile failed");
+ }
+ struct zxdh_mtr_stats *hw_mtr_stats = &reply_info.reply_body.hw_mtr_stats;
+
+ mtr_stats->n_bytes_dropped = hw_mtr_stats->n_bytes_dropped;
+ mtr_stats->n_pkts_dropped = hw_mtr_stats->n_pkts_dropped;
+ }
+
+ return ret;
+}
+
+
+static void
+zxdh_mtr_profile_res_free(struct rte_eth_dev *dev,
+ struct rte_mempool *mtr_profile_mp,
+ struct zxdh_meter_profile *meter_profile,
+ struct rte_mtr_error *error)
+{
+ if (meter_profile->ref_cnt == 0) {
+ ZXDH_MP_FREE_OBJ_FUNC(mtr_profile_mp, meter_profile);
+ return;
+ }
+ if (meter_profile->ref_cnt == 1) {
+ meter_profile->ref_cnt--;
+ zxdh_hw_profile_unref(dev, CAR_A, meter_profile->hw_profile_id, error);
+
+ TAILQ_REMOVE(&zxdh_shared_data->meter_profile_list, meter_profile, next);
+ ZXDH_MP_FREE_OBJ_FUNC(mtr_profile_mp, meter_profile);
+ } else {
+ PMD_DRV_LOG(INFO,
+ "profile %d ref %d is busy",
+ meter_profile->meter_profile_id,
+ meter_profile->ref_cnt);
+ }
+}
+
+static uint16_t
+zxdh_check_hw_profile_exist(struct zxdh_mtr_profile_list *mpl,
+ struct rte_mtr_meter_profile *profile,
+ uint16_t hw_profile_owner_vport)
+{
+ struct zxdh_meter_profile *mp;
+
+ TAILQ_FOREACH(mp, mpl, next) {
+ if ((memcmp(profile, &mp->profile, sizeof(struct rte_mtr_meter_profile)) == 0) &&
+ hw_profile_owner_vport == mp->hw_profile_owner_vport) {
+ return mp->hw_profile_id;
+ }
+ }
+ return ZXDH_HW_PROFILE_MAX;
+}
+
+static void
+zxdh_plcr_param_build(struct rte_mtr_meter_profile *profile,
+ void *plcr_param, uint16_t profile_id)
+{
+ if (profile->packet_mode == 0) {
+ ZXDH_STAT_CAR_PROFILE_CFG_T *p_car_byte_profile_cfg =
+ (ZXDH_STAT_CAR_PROFILE_CFG_T *)plcr_param;
+
+ p_car_byte_profile_cfg->profile_id = profile_id;
+ p_car_byte_profile_cfg->pkt_sign = profile->packet_mode;
+ p_car_byte_profile_cfg->cf = ZXDH_PLCR_CF_UNOVERFLOW;
+ p_car_byte_profile_cfg->cm = ZXDH_PLCR_CM_BLIND;
+ if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+ p_car_byte_profile_cfg->cd = ZXDH_PLCR_CD_SRTCM;
+ p_car_byte_profile_cfg->cir = profile->srtcm_rfc2697.cir * 8 / 1000;
+ p_car_byte_profile_cfg->cbs = profile->srtcm_rfc2697.cbs;
+ p_car_byte_profile_cfg->ebs = profile->srtcm_rfc2697.ebs;
+ } else {
+ p_car_byte_profile_cfg->cd = ZXDH_PLCR_CD_TRTCM;
+ p_car_byte_profile_cfg->cir = profile->trtcm_rfc2698.cir * 8 / 1000;
+ p_car_byte_profile_cfg->cbs = profile->trtcm_rfc2698.cbs;
+ p_car_byte_profile_cfg->eir = (profile->trtcm_rfc2698.pir -
+ profile->trtcm_rfc2698.cir) * 8 / 1000;
+ p_car_byte_profile_cfg->ebs =
+ profile->trtcm_rfc2698.pbs - profile->trtcm_rfc2698.cbs;
+ }
+ } else {
+ ZXDH_STAT_CAR_PKT_PROFILE_CFG_T *p_car_pkt_profile_cfg =
+ (ZXDH_STAT_CAR_PKT_PROFILE_CFG_T *)plcr_param;
+
+ p_car_pkt_profile_cfg->profile_id = profile_id;
+ p_car_pkt_profile_cfg->pkt_sign = profile->packet_mode;
+
+ if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+ p_car_pkt_profile_cfg->cir = profile->srtcm_rfc2697.cir;
+ p_car_pkt_profile_cfg->cbs = profile->srtcm_rfc2697.cbs;
+ } else {
+ p_car_pkt_profile_cfg->cir = profile->trtcm_rfc2698.cir;
+ p_car_pkt_profile_cfg->cbs = profile->trtcm_rfc2698.cbs;
+ }
+ }
+}
+
+static int
+zxdh_hw_profile_config_direct(struct rte_eth_dev *dev __rte_unused,
+ ZXDH_PROFILE_TYPE car_type,
+ uint16_t hw_profile_id,
+ struct zxdh_meter_profile *mp,
+ struct rte_mtr_error *error)
+{
+ int ret = zxdh_np_car_profile_cfg_set(mp->hw_profile_owner_vport,
+ car_type, mp->profile.packet_mode,
+ (uint32_t)hw_profile_id, &mp->plcr_param);
+ if (ret) {
+ PMD_DRV_LOG(ERR, " config hw profile %u failed", hw_profile_id);
+ return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+ "Meter offload cfg profile failed");
+ }
+
+ return 0;
+}
+
+static int zxdh_hw_profile_config(struct rte_eth_dev *dev, uint16_t hw_profile_id,
+ struct zxdh_meter_profile *mp, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_hw_profile_config_direct(dev, CAR_A, hw_profile_id, mp, error);
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_msg_reply_info reply_info = {0};
+ struct zxdh_plcr_profile_cfg *zxdh_plcr_profile_cfg =
+ &msg_info.data.zxdh_plcr_profile_cfg;
+
+ zxdh_plcr_profile_cfg->car_type = CAR_A;
+ zxdh_plcr_profile_cfg->packet_mode = mp->profile.packet_mode;
+ zxdh_plcr_profile_cfg->hw_profile_id = hw_profile_id;
+ rte_memcpy(&zxdh_plcr_profile_cfg->plcr_param,
+ &mp->plcr_param,
+ sizeof(zxdh_plcr_profile_cfg->plcr_param));
+
+ zxdh_msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_CFG_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev,
+ &msg_info,
+ ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_cfg),
+ &reply_info,
+ sizeof(struct zxdh_msg_reply_info));
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed msg: port 0x%x msg type ZXDH_PLCR_CAR_PROFILE_CFG_SET ",
+ hw->vport.vport);
+
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+ "Meter offload cfg profile failed ");
+ }
+ }
+
+ return ret;
+}
+
+static int
+zxdh_mtr_profile_offload(struct rte_eth_dev *dev, struct zxdh_meter_profile *mp,
+ struct rte_mtr_meter_profile *profile, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t hw_profile_owner_vport = ZXDH_GET_OWNER_PF_VPORT(hw->vport.vport);
+
+ mp->hw_profile_owner_vport = hw_profile_owner_vport;
+ uint64_t hw_profile_id =
+ zxdh_check_hw_profile_exist(&zxdh_shared_data->meter_profile_list,
+ profile,
+ hw_profile_owner_vport);
+
+ if (hw_profile_id == ZXDH_HW_PROFILE_MAX) {
+ uint32_t ret = zxdh_hw_profile_alloc(dev, &hw_profile_id, error);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "hw_profile alloc fail");
+ return ret;
+ }
+
+ zxdh_plcr_param_build(profile, &mp->plcr_param, hw_profile_id);
+ ret = zxdh_hw_profile_config(dev, hw_profile_id, mp, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "zxdh_hw_profile_config fail");
+ hw_profile_id = ZXDH_HW_PROFILE_MAX;
+ return ret;
+ }
+ }
+ zxdh_hw_profile_ref(hw_profile_id);
+ mp->hw_profile_id = hw_profile_id;
+
+ return 0;
+}
+
+static int
+zxdh_meter_profile_add(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_meter_profile *mp;
+ int ret;
+
+ ret = zxdh_mtr_profile_validate(meter_profile_id, profile, error);
+ if (ret)
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL, "meter profile validate failed");
+ mp = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+ meter_profile_id,
+ dev->data->port_id);
+
+ if (mp)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "meter profile is exists");
+
+ mp = zxdh_mtr_profile_res_alloc(zxdh_shared_data->mtr_profile_mp);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL, "Meter profile res memory alloc failed.");
+
+ memset(mp, 0, sizeof(struct zxdh_meter_profile));
+
+ mp->meter_profile_id = meter_profile_id;
+ mp->dpdk_port_id = dev->data->port_id;
+ mp->hw_profile_id = UINT16_MAX;
+ rte_memcpy(&mp->profile, profile, sizeof(struct rte_mtr_meter_profile));
+
+ ret = zxdh_mtr_profile_offload(dev, mp, profile, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ " port %d profile id %d offload failed ",
+ dev->data->port_id,
+ meter_profile_id);
+ goto error;
+ }
+
+ TAILQ_INSERT_TAIL(&zxdh_shared_data->meter_profile_list, mp, next);
+ PMD_DRV_LOG(DEBUG,
+ "add profile id %d mp %p mp->ref_cnt %d",
+ meter_profile_id,
+ mp,
+ mp->ref_cnt);
+
+ mp->ref_cnt++;
+
+ return 0;
+error:
+ zxdh_mtr_profile_res_free(dev, zxdh_shared_data->mtr_profile_mp, mp, error);
+ return ret;
+}
+
+static int
+zxdh_meter_profile_delete(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_meter_profile *mp;
+
+ mp = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+ meter_profile_id,
+ dev->data->port_id);
+
+ if (mp == NULL) {
+ PMD_DRV_LOG(ERR, "del profile id %d unfind ", meter_profile_id);
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ &meter_profile_id,
+ "Meter profile id is not exists.");
+ }
+ zxdh_mtr_profile_res_free(dev, zxdh_shared_data->mtr_profile_mp, mp, error);
+
+ return 0;
+}
+
+static int
+zxdh_meter_policy_add(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_meter_policy_params *policy,
+ struct rte_mtr_error *error)
+{
+ int ret = 0;
+ struct zxdh_meter_policy *mtr_policy = NULL;
+
+ if (policy_id >= ZXDH_MAX_POLICY_NUM)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "policy ID is invalid. ");
+ mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+ policy_id,
+ dev->data->port_id);
+
+ if (mtr_policy)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "policy ID exists. ");
+ ret = zxdh_policy_validate_actions(policy->actions, error);
+ if (ret) {
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, " only supports def action.");
+ }
+
+ mtr_policy = zxdh_mtr_policy_res_alloc(zxdh_shared_data->mtr_policy_mp);
+ if (mtr_policy == NULL) {
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy res memory alloc failed.");
+ }
+ /* Fill profile info. */
+ memset(mtr_policy, 0, sizeof(struct zxdh_meter_policy));
+ mtr_policy->policy_id = policy_id;
+ mtr_policy->dpdk_port_id = dev->data->port_id;
+ rte_memcpy(&mtr_policy->policy, policy, sizeof(struct rte_mtr_meter_policy_params));
+ /* Add to list. */
+ TAILQ_INSERT_TAIL(&zxdh_shared_data->mtr_policy_list, mtr_policy, next);
+ mtr_policy->ref_cnt++;
+ PMD_DRV_LOG(INFO, "allic policy id %d ok %p ", mtr_policy->policy_id, mtr_policy);
+ return 0;
+}
+
+static int
+zxdh_meter_policy_delete(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_meter_policy *mtr_policy = NULL;
+
+ if (policy_id >= ZXDH_MAX_POLICY_NUM)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "policy ID is invalid. ");
+ mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+ policy_id, dev->data->port_id);
+
+ if (mtr_policy && mtr_policy->ref_cnt == 1) {
+ TAILQ_REMOVE(&zxdh_shared_data->mtr_policy_list, mtr_policy, next);
+ MP_FREE_OBJ_FUNC(zxdh_shared_data->mtr_policy_mp, mtr_policy);
+ } else {
+ if (mtr_policy) {
+ PMD_DRV_LOG(INFO,
+ " policy id %d ref %d is busy ",
+ mtr_policy->policy_id,
+ mtr_policy->ref_cnt);
+ } else {
+ PMD_DRV_LOG(ERR, " policy id %d is not exist ", policy_id);
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "policy ID is not exist. ");
+ }
+ }
+ return 0;
+}
+
+static int
+zxdh_meter_validate(uint32_t meter_id,
+ struct rte_mtr_params *params,
+ struct rte_mtr_error *error)
+{
+ /* Meter params must not be NULL. */
+ if (params == NULL)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL, "Meter object params null.");
+ /* Previous meter color is not supported. */
+ if (params->use_prev_mtr_color)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ "Previous meter color not supported.");
+ if (meter_id > ZXDH_MAX_MTR_NUM / 2) {
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ " meter id exceed 1024 unsupport ");
+ }
+ return 0;
+}
+
+static int
+zxdh_check_port_mtr_bind(struct rte_eth_dev *dev, uint32_t dir)
+{
+ struct zxdh_mtr_object *mtr_obj = NULL;
+
+ TAILQ_FOREACH(mtr_obj, &zxdh_shared_data->mtr_list, next) {
+ if (mtr_obj->direction != dir)
+ continue;
+ if (mtr_obj->port_id == dev->data->port_id) {
+ PMD_DRV_LOG(INFO,
+ "port %d dir %d already bind meter %d",
+ dev->data->port_id,
+ dir,
+ mtr_obj->meter_id);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static struct zxdh_mtr_object
+*zxdh_mtr_obj_alloc(struct rte_mempool *mtr_mp)
+{
+ struct zxdh_mtr_object *mtr_obj = NULL;
+
+ if (ZXDH_MP_ALLOC_OBJ_FUNC(mtr_mp, mtr_obj) != 0)
+ return NULL;
+
+ return mtr_obj;
+}
+
+static uint32_t dir_to_mtr_mode[] = {
+ ZXDH_PORT_EGRESS_METER_EN_OFF_FLAG,
+ ZXDH_PORT_INGRESS_METER_EN_OFF_FLAG
+};
+
+static int
+zxdh_set_mtr_enable(struct rte_eth_dev *dev, uint8_t dir, bool enable, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_port_attr_table port_attr = {0};
+ int ret = 0;
+
+ if (priv->is_pf) {
+ ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);
+ port_attr.ingress_meter_enable = enable;
+ ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "%s set port attr failed", __func__);
+ return -ret;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+ attr_msg->mode = dir_to_mtr_mode[dir];
+ attr_msg->value = enable;
+ zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+ sizeof(struct zxdh_msg_head) + sizeof(struct zxdh_port_attr_set_msg),
+ NULL, 0);
+ }
+ if (ret) {
+ PMD_DRV_LOG(ERR, " port %d mtr enable failed", priv->port_id);
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+ "Meter enable failed.");
+ }
+ if (dir == ZXDH_INGRESS)
+ priv->i_mtr_en = !!enable;
+ else
+ priv->e_mtr_en = !!enable;
+
+ return ret;
+}
+
+static void
+zxdh_meter_build_actions(struct zxdh_meter_action *mtr_action,
+ struct rte_mtr_params *params)
+{
+ mtr_action->stats_mask = params->stats_mask;
+ mtr_action->action[RTE_COLOR_RED] = ZXDH_MTR_POLICER_ACTION_DROP;
+}
+
+static int
+zxdh_hw_plcrflow_config(struct rte_eth_dev *dev, uint16_t hw_flow_id,
+ struct zxdh_mtr_object *mtr, struct rte_mtr_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+
+ if (hw->is_pf) {
+ uint64_t hw_profile_id = (uint64_t)mtr->profile->hw_profile_id;
+
+ ret = zxdh_np_stat_car_queue_cfg_set(hw->dev_id, CAR_A,
+ hw_flow_id, 1, mtr->enable, hw_profile_id);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpp_stat_car_queue_cfg_set failed flowid %d profile id %d",
+ hw_flow_id, mtr->profile->hw_profile_id);
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL, "Failed to bind plcr flow.");
+ ;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_msg_reply_info reply_info = {0};
+ struct zxdh_plcr_flow_cfg *zxdh_plcr_flow_cfg = &msg_info.data.zxdh_plcr_flow_cfg;
+
+ zxdh_plcr_flow_cfg->car_type = CAR_A;
+ zxdh_plcr_flow_cfg->flow_id = hw_flow_id;
+ zxdh_plcr_flow_cfg->drop_flag = 1;
+ zxdh_plcr_flow_cfg->plcr_en = mtr->enable;
+ zxdh_plcr_flow_cfg->profile_id = mtr->profile->hw_profile_id;
+ zxdh_msg_head_build(hw, ZXDH_PLCR_CAR_QUEUE_CFG_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+ ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_flow_cfg),
+ &reply_info,
+ sizeof(struct zxdh_msg_reply_info));
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed msg: port 0x%x msg type ZXDH_PLCR_CAR_QUEUE_CFG_SET ",
+ hw->vport.vport);
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL, "Failed to bind plcr flow.");
+ }
+ }
+
+ return ret;
+}
+
+static void
+zxdh_mtr_obj_free(struct rte_eth_dev *dev, struct zxdh_mtr_object *mtr_obj)
+{
+ struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+ struct rte_mempool *mtr_mp = zxdh_shared_data->mtr_mp;
+
+ PMD_DRV_LOG(INFO, "free port %d dir %d meter %d mtr refcnt:%d ....",
+ dev->data->port_id, mtr_obj->direction, mtr_obj->meter_id, mtr_obj->mtr_ref_cnt);
+
+ if (mtr_obj->policy)
+ mtr_obj->policy->ref_cnt--;
+
+ if (mtr_obj->profile)
+ mtr_obj->profile->ref_cnt--;
+
+ PMD_DRV_LOG(INFO,
+ "free port %d dir %d meter %d profile refcnt:%d ",
+ dev->data->port_id,
+ mtr_obj->direction,
+ mtr_obj->meter_id,
+ mtr_obj->profile ? mtr_obj->profile->ref_cnt : 0);
+
+ if (--mtr_obj->mtr_ref_cnt == 0) {
+ PMD_DRV_LOG(INFO, "rm mtr %p refcnt:%d ....", mtr_obj, mtr_obj->mtr_ref_cnt);
+ TAILQ_REMOVE(mtr_list, mtr_obj, next);
+ MP_FREE_OBJ_FUNC(mtr_mp, mtr_obj);
+ }
+}
+
+static int
+zxdh_mtr_flow_offlad(struct rte_eth_dev *dev,
+ struct zxdh_mtr_object *mtr,
+ struct rte_mtr_error *error)
+{
+ uint16_t hw_flow_id;
+
+ hw_flow_id = mtr->vfid * 2 + ZXDH_PORT_MTR_FID_BASE + mtr->direction;
+ return zxdh_hw_plcrflow_config(dev, hw_flow_id, mtr, error);
+}
+
+static struct zxdh_mtr_object *
+zxdh_mtr_find(uint32_t meter_id, uint16_t dpdk_portid)
+{
+ struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+ struct zxdh_mtr_object *mtr = NULL;
+
+ TAILQ_FOREACH(mtr, mtr_list, next) {
+ PMD_DRV_LOG(INFO,
+ "mtrlist head %p mtr %p mtr->meterid %d to find mtrid %d",
+ TAILQ_FIRST(mtr_list),
+ mtr,
+ mtr->meter_id,
+ meter_id
+ );
+
+ if (meter_id == mtr->meter_id && dpdk_portid == mtr->port_id)
+ return mtr;
+ }
+ return NULL;
+}
+
+static int
+zxdh_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ struct rte_mtr_params *params, int shared,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+ struct zxdh_mtr_object *mtr;
+ struct zxdh_meter_profile *mtr_profile;
+ struct zxdh_meter_policy *mtr_policy;
+ uint8_t dir = 0;
+ int ret;
+
+ if (shared)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+ "Meter share is not supported");
+
+ ret = zxdh_meter_validate(meter_id, params, error);
+ if (ret)
+ return ret;
+
+ if (zxdh_check_port_mtr_bind(dev, dir))
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter object already bind to dev.");
+
+ mtr_profile = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+ params->meter_profile_id,
+ dev->data->port_id
+ );
+
+ if (mtr_profile == NULL)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE, ¶ms->meter_profile_id,
+ "Meter profile object is not exists.");
+ mtr_profile->ref_cnt++;
+ mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+ params->meter_policy_id,
+ dev->data->port_id);
+
+ if (mtr_policy == NULL) {
+ ret = -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE, ¶ms->meter_policy_id,
+ "Meter policy object is not exists.");
+ mtr_profile->ref_cnt--;
+ return ret;
+ }
+ mtr_policy->ref_cnt++;
+
+ mtr = zxdh_mtr_obj_alloc(zxdh_shared_data->mtr_mp);
+ if (mtr == NULL) {
+ ret = -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+ "Memory alloc failed for meter.");
+ mtr_policy->ref_cnt--;
+ mtr_profile->ref_cnt--;
+ return ret;
+ }
+ memset(mtr, 0, sizeof(struct zxdh_mtr_object));
+
+ mtr->meter_id = meter_id;
+ mtr->profile = mtr_profile;
+
+ zxdh_meter_build_actions(&mtr->mtr_action, params);
+ TAILQ_INSERT_TAIL(mtr_list, mtr, next);
+ mtr->enable = !!params->meter_enable;
+ mtr->shared = !!shared;
+ mtr->mtr_ref_cnt++;
+ mtr->vfid = priv->vfid;
+ mtr->port_id = dev->data->port_id;
+ mtr->policy = mtr_policy;
+ mtr->direction = !!dir;
+ if (params->meter_enable) {
+ ret = zxdh_mtr_flow_offlad(dev, mtr, error);
+ if (ret)
+ goto error;
+ }
+ ret = zxdh_set_mtr_enable(dev, mtr->direction, 1, error);
+ if (ret)
+ goto error;
+ return ret;
+error:
+ zxdh_mtr_obj_free(dev, mtr);
+ return ret;
+}
+
+static int
+zxdh_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_mtr_object *mtr;
+
+ mtr = zxdh_mtr_find(meter_id, dev->data->port_id);
+ if (mtr == NULL)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL, "Meter object id not valid.");
+ mtr->enable = 0;
+ zxdh_set_mtr_enable(dev, mtr->direction, 0, error);
+
+ if (zxdh_mtr_flow_offlad(dev, mtr, error))
+ return -1;
+
+ zxdh_mtr_obj_free(dev, mtr);
+ return 0;
+}
+
+void
+zxdh_mtr_policy_res_free(struct rte_mempool *mtr_policy_mp, struct zxdh_meter_policy *policy)
+{
+ PMD_DRV_LOG(INFO, "to free policy %d ref %d ", policy->policy_id, policy->ref_cnt);
+
+ if (--policy->ref_cnt == 0) {
+ TAILQ_REMOVE(&zxdh_shared_data->mtr_policy_list, policy, next);
+ MP_FREE_OBJ_FUNC(mtr_policy_mp, policy);
+ }
+}
+
+static int
+zxdh_mtr_stats_read(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error)
+{
+ struct zxdh_mtr_stats mtr_stat = {0};
+ struct zxdh_mtr_object *mtr = NULL;
+ int ret = 0;
+ /* Meter object must exist. */
+ mtr = zxdh_mtr_find(mtr_id, dev->data->port_id);
+ if (mtr == NULL)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL, "Meter object id not valid.");
+ *stats_mask = RTE_MTR_STATS_N_BYTES_DROPPED | RTE_MTR_STATS_N_PKTS_DROPPED;
+ memset(&mtr_stat, 0, sizeof(mtr_stat));
+ ret = zxdh_mtr_hw_counter_query(dev, clear, mtr->direction, &mtr_stat, error);
+ if (ret)
+ goto error;
+ stats->n_bytes_dropped = mtr_stat.n_bytes_dropped;
+ stats->n_pkts_dropped = mtr_stat.n_pkts_dropped;
+
+ return 0;
+error:
+ return -rte_mtr_error_set(error, ret, RTE_MTR_ERROR_TYPE_STATS, NULL,
+ "Failed to read meter drop counters.");
+}
+
+static const struct rte_mtr_ops zxdh_mtr_ops = {
+ .capabilities_get = zxdh_meter_cap_get,
+ .meter_profile_add = zxdh_meter_profile_add,
+ .meter_profile_delete = zxdh_meter_profile_delete,
+ .create = zxdh_meter_create,
+ .destroy = zxdh_meter_destroy,
+ .stats_read = zxdh_mtr_stats_read,
+ .meter_policy_add = zxdh_meter_policy_add,
+ .meter_policy_delete = zxdh_meter_policy_delete,
+};
+
+int
+zxdh_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+ *(const struct rte_mtr_ops **)arg = &zxdh_mtr_ops;
+ return 0;
+}
+
+void
+zxdh_mtr_release(struct rte_eth_dev *dev __rte_unused)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_meter_profile *profile;
+ struct rte_mtr_error error = {0};
+ struct zxdh_mtr_object *mtr_obj;
+
+ RTE_TAILQ_FOREACH(mtr_obj, &zxdh_shared_data->mtr_list, next) {
+ if (mtr_obj->port_id == priv->port_id)
+ zxdh_mtr_obj_free(dev, mtr_obj);
+ }
+
+
+ RTE_TAILQ_FOREACH(profile, &zxdh_shared_data->meter_profile_list, next) {
+ if (profile->dpdk_port_id == priv->port_id)
+ zxdh_mtr_profile_res_free(dev,
+ zxdh_shared_data->mtr_profile_mp,
+ profile,
+ &error
+ );
+ }
+
+ struct zxdh_meter_policy *policy;
+
+ RTE_TAILQ_FOREACH(policy, &zxdh_shared_data->mtr_policy_list, next) {
+ if (policy->dpdk_port_id == priv->port_id)
+ zxdh_mtr_policy_res_free(zxdh_shared_data->mtr_policy_mp, policy);
+ }
+}
diff --git a/drivers/net/zxdh/zxdh_mtr.h b/drivers/net/zxdh/zxdh_mtr.h
new file mode 100644
index 0000000000..51ddc0840b
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_MTR_H
+#define ZXDH_MTR_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+#include <rte_mtr.h>
+
+#include "zxdh_np.h"
+
+#define HW_PROFILE_MAX 512
+#define ZXDH_MAX_MTR_NUM 2048
+#define ZXDH_MAX_POLICY_NUM ZXDH_MAX_MTR_NUM
+#define MAX_MTR_PROFILE_NUM HW_PROFILE_MAX
+#define ZXDH_INGRESS 1
+#define ZXDH_EGRESS 2
+
+#define MP_FREE_OBJ_FUNC(mp, obj) rte_mempool_put(mp, obj)
+
+struct zxdh_mtr_res {
+ rte_spinlock_t hw_plcr_res_lock;
+ uint32_t hw_profile_refcnt[HW_PROFILE_MAX];
+ struct rte_mtr_meter_profile profile[HW_PROFILE_MAX];
+};
+
+extern struct zxdh_mtr_res g_mtr_res;
+extern struct zxdh_shared_data *zxdh_shared_data;
+
+enum rte_mtr_policer_action {
+ ZXDH_MTR_POLICER_ACTION_COLOR_GREEN = 0,
+ ZXDH_MTR_POLICER_ACTION_COLOR_YELLOW,
+ ZXDH_MTR_POLICER_ACTION_COLOR_RED,
+ ZXDH_MTR_POLICER_ACTION_DROP,
+};
+
+union zxdh_offload_profile_cfg {
+ ZXDH_STAT_CAR_PKT_PROFILE_CFG_T p_car_pkt_profile_cfg;
+ ZXDH_STAT_CAR_PROFILE_CFG_T p_car_byte_profile_cfg;
+};
+
+/* meter profile structure. */
+struct zxdh_meter_profile {
+ TAILQ_ENTRY(zxdh_meter_profile) next; /* Pointer to the next flow meter structure. */
+ uint16_t dpdk_port_id;
+ uint16_t hw_profile_owner_vport;
+ uint16_t meter_profile_id; /* software Profile id. */
+ uint16_t hw_profile_id; /* hardware Profile id. */
+ struct rte_mtr_meter_profile profile; /* Profile detail. */
+ union zxdh_offload_profile_cfg plcr_param;
+ uint32_t ref_cnt; /* used count. */
+};
+TAILQ_HEAD(zxdh_mtr_profile_list, zxdh_meter_profile);
+
+struct zxdh_meter_policy {
+ TAILQ_ENTRY(zxdh_meter_policy) next;
+ uint16_t policy_id;
+ uint16_t ref_cnt;
+ uint16_t dpdk_port_id;
+ uint16_t rsv;
+ struct rte_mtr_meter_policy_params policy;
+};
+TAILQ_HEAD(zxdh_mtr_policy_list, zxdh_meter_policy);
+
+struct zxdh_meter_action {
+ enum rte_mtr_policer_action action[RTE_COLORS];
+ uint64_t stats_mask;
+};
+
+struct zxdh_mtr_object {
+ TAILQ_ENTRY(zxdh_mtr_object) next;
+ uint8_t direction:1, /* 0:ingress, 1:egress */
+ shared:1,
+ enable:1,
+ rsv:5;
+ uint8_t rsv8;
+ uint16_t port_id;
+ uint16_t vfid;
+ uint16_t meter_id;
+ uint16_t mtr_ref_cnt;
+ uint16_t rsv16;
+ struct zxdh_meter_profile *profile;
+ struct zxdh_meter_policy *policy;
+ struct zxdh_meter_action mtr_action;
+};
+TAILQ_HEAD(zxdh_mtr_list, zxdh_mtr_object);
+
+struct zxdh_mtr_stats {
+ uint64_t n_pkts_dropped;
+ uint64_t n_bytes_dropped;
+};
+
+struct zxdh_hw_mtr_stats {
+ uint32_t n_pkts_dropped_hi;
+ uint32_t n_pkts_dropped_lo;
+ uint32_t n_bytes_dropped_hi;
+ uint32_t n_bytes_dropped_lo;
+};
+
+int zxdh_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
+void zxdh_mtr_release(struct rte_eth_dev *dev __rte_unused);
+void zxdh_mtr_policy_res_free(struct rte_mempool *mtr_policy_mp, struct zxdh_meter_policy *policy);
+int zxdh_hw_profile_unref(struct rte_eth_dev *dev,
+ uint8_t car_type,
+ uint16_t hw_profile_id,
+ struct rte_mtr_error *error);
+int zxdh_hw_profile_alloc_direct(struct rte_eth_dev *dev, ZXDH_PROFILE_TYPE car_type,
+ uint64_t *hw_profile_id, struct rte_mtr_error *error);
+int zxdh_hw_profile_ref(uint16_t hw_profile_id);
+
+#endif /* ZXDH_MTR_H */
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index 015372bedc..926880fd4e 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -11,6 +11,7 @@
#include <rte_malloc.h>
#include <rte_memcpy.h>
+#include "zxdh_msg.h"
#include "zxdh_np.h"
#include "zxdh_logs.h"
@@ -142,6 +143,46 @@ do {\
#define ZXDH_DTB_QUEUE_INIT_FLAG_GET(DEV_ID, QUEUE_ID) \
(p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].init_flag)
+ZXDH_FIELD_T g_stat_car0_cara_queue_ram0_159_0_reg[] = {
+ {"cara_drop", ZXDH_FIELD_FLAG_RW, 147, 1, 0x0, 0x0},
+ {"cara_plcr_en", ZXDH_FIELD_FLAG_RW, 146, 1, 0x0, 0x0},
+ {"cara_profile_id", ZXDH_FIELD_FLAG_RW, 145, 9, 0x0, 0x0},
+ {"cara_tq_h", ZXDH_FIELD_FLAG_RO, 136, 13, 0x0, 0x0},
+ {"cara_tq_l", ZXDH_FIELD_FLAG_RO, 123, 32, 0x0, 0x0},
+ {"cara_ted", ZXDH_FIELD_FLAG_RO, 91, 19, 0x0, 0x0},
+ {"cara_tcd", ZXDH_FIELD_FLAG_RO, 72, 19, 0x0, 0x0},
+ {"cara_tei", ZXDH_FIELD_FLAG_RO, 53, 27, 0x0, 0x0},
+ {"cara_tci", ZXDH_FIELD_FLAG_RO, 26, 27, 0x0, 0x0},
+};
+
+ZXDH_FIELD_T g_stat_car0_carb_queue_ram0_159_0_reg[] = {
+ {"carb_drop", ZXDH_FIELD_FLAG_RW, 147, 1, 0x0, 0x0},
+ {"carb_plcr_en", ZXDH_FIELD_FLAG_RW, 146, 1, 0x0, 0x0},
+ {"carb_profile_id", ZXDH_FIELD_FLAG_RW, 145, 9, 0x0, 0x0},
+ {"carb_tq_h", ZXDH_FIELD_FLAG_RO, 136, 13, 0x0, 0x0},
+ {"carb_tq_l", ZXDH_FIELD_FLAG_RO, 123, 32, 0x0, 0x0},
+ {"carb_ted", ZXDH_FIELD_FLAG_RO, 91, 19, 0x0, 0x0},
+ {"carb_tcd", ZXDH_FIELD_FLAG_RO, 72, 19, 0x0, 0x0},
+ {"carb_tei", ZXDH_FIELD_FLAG_RO, 53, 27, 0x0, 0x0},
+ {"carb_tci", ZXDH_FIELD_FLAG_RO, 26, 27, 0x0, 0x0},
+};
+
+ZXDH_FIELD_T g_stat_car0_carc_queue_ram0_159_0_reg[] = {
+ {"carc_drop", ZXDH_FIELD_FLAG_RW, 147, 1, 0x0, 0x0},
+ {"carc_plcr_en", ZXDH_FIELD_FLAG_RW, 146, 1, 0x0, 0x0},
+ {"carc_profile_id", ZXDH_FIELD_FLAG_RW, 145, 9, 0x0, 0x0},
+ {"carc_tq_h", ZXDH_FIELD_FLAG_RO, 136, 13, 0x0, 0x0},
+ {"carc_tq_l", ZXDH_FIELD_FLAG_RO, 123, 32, 0x0, 0x0},
+ {"carc_ted", ZXDH_FIELD_FLAG_RO, 91, 19, 0x0, 0x0},
+ {"carc_tcd", ZXDH_FIELD_FLAG_RO, 72, 19, 0x0, 0x0},
+ {"carc_tei", ZXDH_FIELD_FLAG_RO, 53, 27, 0x0, 0x0},
+ {"carc_tci", ZXDH_FIELD_FLAG_RO, 26, 27, 0x0, 0x0},
+};
+
+ZXDH_FIELD_T g_nppu_pktrx_cfg_pktrx_glbal_cfg_0_reg[] = {
+ {"pktrx_glbal_cfg_0", ZXDH_FIELD_FLAG_RW, 31, 32, 0x0, 0x0},
+};
+
static uint32_t
zxdh_np_comm_is_big_endian(void)
{
@@ -2321,3 +2362,427 @@ zxdh_np_stat_ppu_cnt_get_ex(uint32_t dev_id,
return rc;
}
+
+static uint32_t
+zxdh_np_agent_channel_sync_send(ZXDH_AGENT_CHANNEL_MSG_T *p_msg,
+ uint32_t *p_data,
+ uint32_t rep_len)
+{
+ uint32_t ret = 0;
+ uint32_t vport = 0;
+ struct zxdh_pci_bar_msg in = {0};
+ struct zxdh_msg_recviver_mem result = {0};
+ uint32_t *recv_buffer = NULL;
+ uint8_t *reply_ptr = NULL;
+ uint16_t reply_msg_len = 0;
+ uint64_t agent_addr = 0;
+
+ if (ZXDH_IS_PF(vport))
+ in.src = ZXDH_MSG_CHAN_END_PF;
+ else
+ in.src = ZXDH_MSG_CHAN_END_VF;
+
+ in.virt_addr = agent_addr;
+ in.payload_addr = p_msg->msg;
+ in.payload_len = p_msg->msg_len;
+ in.dst = ZXDH_MSG_CHAN_END_RISC;
+ in.module_id = ZXDH_BAR_MDOULE_NPSDK;
+
+ recv_buffer = (uint32_t *)rte_zmalloc(NULL, rep_len + ZXDH_CHANNEL_REPS_LEN, 0);
+ if (recv_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ result.buffer_len = rep_len + ZXDH_CHANNEL_REPS_LEN;
+ result.recv_buffer = recv_buffer;
+
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ if (ret == ZXDH_BAR_MSG_OK) {
+ reply_ptr = (uint8_t *)(result.recv_buffer);
+ if (*reply_ptr == 0XFF) {
+ reply_msg_len = *(uint16_t *)(reply_ptr + 1);
+ rte_memcpy(p_data, reply_ptr + 4,
+ ((reply_msg_len > rep_len) ? rep_len : reply_msg_len));
+ } else {
+ PMD_DRV_LOG(ERR, "Message not replied");
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Error[0x%x], %s failed!", ret, __func__);
+ }
+
+ rte_free(recv_buffer);
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_plcr_sync_send(ZXDH_AGENT_CHANNEL_PLCR_MSG_T *p_msg,
+ uint32_t *p_data, uint32_t rep_len)
+{
+ uint32_t ret = 0;
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {0};
+
+ agent_msg.msg = (void *)p_msg;
+ agent_msg.msg_len = sizeof(ZXDH_AGENT_CHANNEL_PLCR_MSG_T);
+
+ ret = zxdh_np_agent_channel_sync_send(&agent_msg, p_data, rep_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: agent_channel_sync_send failed.", __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_agent_channel_plcr_profileid_request(uint32_t vport,
+ uint32_t car_type, uint32_t *p_profileid)
+{
+ uint32_t ret = 0;
+ uint32_t resp_buffer[2] = {0};
+
+ ZXDH_AGENT_CHANNEL_PLCR_MSG_T msgcfg = {0};
+
+ msgcfg.dev_id = 0;
+ msgcfg.type = ZXDH_PLCR_MSG;
+ msgcfg.oper = ZXDH_PROFILEID_REQUEST;
+ msgcfg.vport = vport;
+ msgcfg.car_type = car_type;
+ msgcfg.profile_id = 0xFFFF;
+
+ ret = zxdh_np_agent_channel_plcr_sync_send(&msgcfg,
+ resp_buffer, sizeof(resp_buffer));
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: agent_channel_plcr_sync_send failed.", __func__);
+ return 1;
+ }
+
+ rte_memcpy(p_profileid, resp_buffer, sizeof(uint32_t) * ZXDH_SCHE_RSP_LEN);
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_plcr_car_rate(uint32_t car_type,
+ uint32_t pkt_sign,
+ uint32_t profile_id __rte_unused,
+ void *p_car_profile_cfg)
+{
+ uint32_t ret = 0;
+ uint32_t resp_buffer[2] = {0};
+ uint32_t resp_len = 8;
+ uint32_t i = 0;
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {0};
+ ZXDH_AGENT_CAR_PKT_PROFILE_MSG_T msgpktcfg = {0};
+ ZXDH_AGENT_CAR_PROFILE_MSG_T msgcfg = {0};
+ ZXDH_STAT_CAR_PROFILE_CFG_T *p_stat_car_profile_cfg = NULL;
+ ZXDH_STAT_CAR_PKT_PROFILE_CFG_T *p_stat_pkt_car_profile_cfg = NULL;
+
+ if (car_type == ZXDH_STAT_CAR_A_TYPE && pkt_sign == 1) {
+ p_stat_pkt_car_profile_cfg = (ZXDH_STAT_CAR_PKT_PROFILE_CFG_T *)p_car_profile_cfg;
+ msgpktcfg.dev_id = 0;
+ msgpktcfg.type = ZXDH_PLCR_CAR_PKT_RATE;
+ msgpktcfg.car_level = car_type;
+ msgpktcfg.cir = p_stat_pkt_car_profile_cfg->cir;
+ msgpktcfg.cbs = p_stat_pkt_car_profile_cfg->cbs;
+ msgpktcfg.profile_id = p_stat_pkt_car_profile_cfg->profile_id;
+ msgpktcfg.pkt_sign = p_stat_pkt_car_profile_cfg->pkt_sign;
+ for (i = 0; i < ZXDH_CAR_PRI_MAX; i++)
+ msgpktcfg.pri[i] = p_stat_pkt_car_profile_cfg->pri[i];
+
+ agent_msg.msg = (void *)&msgpktcfg;
+ agent_msg.msg_len = sizeof(ZXDH_AGENT_CAR_PKT_PROFILE_MSG_T);
+
+ ret = zxdh_np_agent_channel_sync_send(&agent_msg, resp_buffer, resp_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: stat_car_a_type failed.", __func__);
+ return 1;
+ }
+
+ ret = *(uint8_t *)resp_buffer;
+ } else {
+ p_stat_car_profile_cfg = (ZXDH_STAT_CAR_PROFILE_CFG_T *)p_car_profile_cfg;
+ msgcfg.dev_id = 0;
+ msgcfg.type = ZXDH_PLCR_CAR_RATE;
+ msgcfg.car_level = car_type;
+ msgcfg.cir = p_stat_car_profile_cfg->cir;
+ msgcfg.cbs = p_stat_car_profile_cfg->cbs;
+ msgcfg.profile_id = p_stat_car_profile_cfg->profile_id;
+ msgcfg.pkt_sign = p_stat_car_profile_cfg->pkt_sign;
+ msgcfg.cd = p_stat_car_profile_cfg->cd;
+ msgcfg.cf = p_stat_car_profile_cfg->cf;
+ msgcfg.cm = p_stat_car_profile_cfg->cm;
+ msgcfg.cir = p_stat_car_profile_cfg->cir;
+ msgcfg.cbs = p_stat_car_profile_cfg->cbs;
+ msgcfg.eir = p_stat_car_profile_cfg->eir;
+ msgcfg.ebs = p_stat_car_profile_cfg->ebs;
+ msgcfg.random_disc_e = p_stat_car_profile_cfg->random_disc_e;
+ msgcfg.random_disc_c = p_stat_car_profile_cfg->random_disc_c;
+ for (i = 0; i < ZXDH_CAR_PRI_MAX; i++) {
+ msgcfg.c_pri[i] = p_stat_car_profile_cfg->c_pri[i];
+ msgcfg.e_green_pri[i] = p_stat_car_profile_cfg->e_green_pri[i];
+ msgcfg.e_yellow_pri[i] = p_stat_car_profile_cfg->e_yellow_pri[i];
+ }
+
+ agent_msg.msg = (void *)&msgcfg;
+ agent_msg.msg_len = sizeof(ZXDH_AGENT_CAR_PROFILE_MSG_T);
+
+ ret = zxdh_np_agent_channel_sync_send(&agent_msg, resp_buffer, resp_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: stat_car_b_type failed.", __func__);
+ return 1;
+ }
+
+ ret = *(uint8_t *)resp_buffer;
+ }
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_plcr_profileid_release(uint32_t vport,
+ uint32_t car_type __rte_unused,
+ uint32_t profileid)
+{
+ uint32_t ret = 0;
+ uint32_t resp_buffer[2] = {0};
+
+ ZXDH_AGENT_CHANNEL_PLCR_MSG_T msgcfg = {0};
+
+ msgcfg.dev_id = 0;
+ msgcfg.type = ZXDH_PLCR_MSG;
+ msgcfg.oper = ZXDH_PROFILEID_RELEASE;
+ msgcfg.vport = vport;
+ msgcfg.profile_id = profileid;
+
+ ret = zxdh_np_agent_channel_plcr_sync_send(&msgcfg,
+ resp_buffer, sizeof(resp_buffer));
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: agent_channel_plcr_sync_send failed.", __func__);
+ return 1;
+ }
+
+ ret = *(uint8_t *)resp_buffer;
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_stat_cara_queue_cfg_set(uint32_t dev_id,
+ uint32_t flow_id,
+ uint32_t drop_flag,
+ uint32_t plcr_en,
+ uint32_t profile_id)
+{
+ uint32_t rc = 0;
+
+ ZXDH_STAT_CAR0_CARA_QUEUE_RAM0_159_0_T queue_cfg = {0};
+
+ queue_cfg.cara_drop = drop_flag;
+ queue_cfg.cara_plcr_en = plcr_en;
+ queue_cfg.cara_profile_id = profile_id;
+
+ rc = zxdh_np_reg_write(dev_id,
+ ZXDH_STAT_CAR0_CARA_QUEUE_RAM0,
+ 0,
+ flow_id,
+ &queue_cfg);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_reg_write");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_stat_carb_queue_cfg_set(uint32_t dev_id,
+ uint32_t flow_id,
+ uint32_t drop_flag,
+ uint32_t plcr_en,
+ uint32_t profile_id)
+{
+ uint32_t rc = 0;
+
+ ZXDH_STAT_CAR0_CARB_QUEUE_RAM0_159_0_T queue_cfg = {0};
+
+ queue_cfg.carb_drop = drop_flag;
+ queue_cfg.carb_plcr_en = plcr_en;
+ queue_cfg.carb_profile_id = profile_id;
+
+ rc = zxdh_np_reg_write(dev_id,
+ ZXDH_STAT_CAR0_CARB_QUEUE_RAM0,
+ 0,
+ flow_id,
+ &queue_cfg);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_reg_write");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_stat_carc_queue_cfg_set(uint32_t dev_id,
+ uint32_t flow_id,
+ uint32_t drop_flag,
+ uint32_t plcr_en,
+ uint32_t profile_id)
+{
+ uint32_t rc = 0;
+
+ ZXDH_STAT_CAR0_CARC_QUEUE_RAM0_159_0_T queue_cfg = {0};
+
+ queue_cfg.carc_drop = drop_flag;
+ queue_cfg.carc_plcr_en = plcr_en;
+ queue_cfg.carc_profile_id = profile_id;
+
+ rc = zxdh_np_reg_write(dev_id,
+ ZXDH_STAT_CAR0_CARC_QUEUE_RAM0,
+ 0,
+ flow_id,
+ &queue_cfg);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_reg_write");
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_car_profile_id_add(uint32_t vport_id,
+ uint32_t flags,
+ uint64_t *p_profile_id)
+{
+ uint32_t ret = 0;
+ uint32_t *profile_id = NULL;
+ uint32_t profile_id_h = 0;
+ uint32_t profile_id_l = 0;
+ uint64_t temp_profile_id = 0;
+
+ profile_id = (uint32_t *)rte_zmalloc(NULL, ZXDH_G_PROFILE_ID_LEN, 0);
+ if (profile_id == NULL) {
+ PMD_DRV_LOG(ERR, "%s: profile_id point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ ret = zxdh_np_agent_channel_plcr_profileid_request(vport_id, flags, profile_id);
+
+ profile_id_h = *(profile_id + 1);
+ profile_id_l = *profile_id;
+ rte_free(profile_id);
+
+ temp_profile_id = (((uint64_t)profile_id_l) << 32) | ((uint64_t)profile_id_h);
+ if (0 != (uint32_t)(temp_profile_id >> 56)) {
+ PMD_DRV_LOG(ERR, "%s: profile_id is overflow!", __func__);
+ return 1;
+ }
+
+ *p_profile_id = temp_profile_id;
+
+ return ret;
+}
+
+uint32_t
+zxdh_np_car_profile_cfg_set(uint32_t vport_id __rte_unused,
+ uint32_t car_type,
+ uint32_t pkt_sign,
+ uint32_t profile_id,
+ void *p_car_profile_cfg)
+{
+ uint32_t ret = 0;
+
+ ret = zxdh_np_agent_channel_plcr_car_rate(car_type,
+ pkt_sign, profile_id, p_car_profile_cfg);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: plcr_car_rate set failed!", __func__);
+ return 1;
+ }
+
+ return ret;
+}
+
+uint32_t
+zxdh_np_car_profile_id_delete(uint32_t vport_id,
+ uint32_t flags, uint64_t profile_id)
+{
+ uint32_t ret = 0;
+ uint32_t profileid = 0;
+
+ profileid = profile_id & 0xFFFF;
+
+ ret = zxdh_np_agent_channel_plcr_profileid_release(vport_id, flags, profileid);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "%s: plcr profiled id release failed!", __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+uint32_t
+zxdh_np_stat_car_queue_cfg_set(uint32_t dev_id,
+ uint32_t car_type,
+ uint32_t flow_id,
+ uint32_t drop_flag,
+ uint32_t plcr_en,
+ uint32_t profile_id)
+{
+ uint32_t rc = 0;
+
+ if (car_type == ZXDH_STAT_CAR_A_TYPE) {
+ if (flow_id > ZXDH_CAR_A_FLOW_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car a type flow_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+
+ if (profile_id > ZXDH_CAR_A_PROFILE_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car a type profile_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+ } else if (car_type == ZXDH_STAT_CAR_B_TYPE) {
+ if (flow_id > ZXDH_CAR_B_FLOW_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car b type flow_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+
+ if (profile_id > ZXDH_CAR_B_PROFILE_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car b type profile_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+ } else {
+ if (flow_id > ZXDH_CAR_C_FLOW_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car c type flow_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+
+ if (profile_id > ZXDH_CAR_C_PROFILE_ID_MAX) {
+ PMD_DRV_LOG(ERR, "%s: stat car c type profile_id invalid!", __func__);
+ return ZXDH_PAR_CHK_INVALID_INDEX;
+ }
+ }
+
+ switch (car_type) {
+ case ZXDH_STAT_CAR_A_TYPE:
+ rc = zxdh_np_stat_cara_queue_cfg_set(dev_id,
+ flow_id,
+ drop_flag,
+ plcr_en,
+ profile_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "stat_cara_queue_cfg_set");
+ break;
+
+ case ZXDH_STAT_CAR_B_TYPE:
+ rc = zxdh_np_stat_carb_queue_cfg_set(dev_id,
+ flow_id,
+ drop_flag,
+ plcr_en,
+ profile_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "stat_carb_queue_cfg_set");
+ break;
+
+ case ZXDH_STAT_CAR_C_TYPE:
+ rc = zxdh_np_stat_carc_queue_cfg_set(dev_id,
+ flow_id,
+ drop_flag,
+ plcr_en,
+ profile_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "stat_carc_queue_cfg_set");
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index d793189657..63ebd12c18 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -147,6 +147,43 @@
#define ZXDH_RC_DTB_SEARCH_VPORT_QUEUE_ZERO (ZXDH_RC_DTB_BASE | 0x17)
#define ZXDH_RC_DTB_QUEUE_NOT_ENABLE (ZXDH_RC_DTB_BASE | 0x18)
+#define ZXDH_SCHE_RSP_LEN (2)
+#define ZXDH_G_PROFILE_ID_LEN (8)
+
+#define ZXDH_CAR_A_FLOW_ID_MAX (0x7fff)
+#define ZXDH_CAR_B_FLOW_ID_MAX (0xfff)
+#define ZXDH_CAR_C_FLOW_ID_MAX (0x3ff)
+#define ZXDH_CAR_A_PROFILE_ID_MAX (0x1ff)
+#define ZXDH_CAR_B_PROFILE_ID_MAX (0x7f)
+#define ZXDH_CAR_C_PROFILE_ID_MAX (0x1f)
+
+#define ZXDH_SYS_NP_BASE_ADDR0 (0x00000000)
+#define ZXDH_SYS_NP_BASE_ADDR1 (0x02000000)
+
+#define ZXDH_FIELD_FLAG_RO (1 << 0)
+#define ZXDH_FIELD_FLAG_RW (1 << 1)
+
+#define ZXDH_VF_ACTIVE(VPORT) (((VPORT) & 0x0800) >> 11)
+#define ZXDH_EPID_BY(VPORT) (((VPORT) & 0x7000) >> 12)
+#define ZXDH_FUNC_NUM(VPORT) (((VPORT) & 0x0700) >> 8)
+#define ZXDH_VFUNC_NUM(VPORT) (((VPORT) & 0x00FF))
+#define ZXDH_IS_PF(VPORT) (!ZXDH_VF_ACTIVE(VPORT))
+
+#define ZXDH_CHANNEL_REPS_LEN (4)
+
+typedef enum zxdh_module_base_addr_e {
+ ZXDH_MODULE_SE_SMMU0_BASE_ADDR = 0x00000000,
+ ZXDH_MODULE_DTB_ENQ_BASE_ADDR = 0x00000000,
+ ZXDH_MODULE_NPPU_PKTRX_CFG_BASE_ADDR = 0x00000800,
+} ZXDH_MODULE_BASE_ADDR_E;
+
+typedef enum zxdh_sys_base_addr_e {
+ ZXDH_SYS_NPPU_BASE_ADDR = (ZXDH_SYS_NP_BASE_ADDR0 + 0x00000000),
+ ZXDH_SYS_SE_SMMU0_BASE_ADDR = (ZXDH_SYS_NP_BASE_ADDR0 + 0x00300000),
+ ZXDH_SYS_DTB_BASE_ADDR = (ZXDH_SYS_NP_BASE_ADDR1 + 0x00000000),
+ ZXDH_SYS_MAX_BASE_ADDR = 0x20000000,
+} ZXDH_SYS_BASE_ADDR_E;
+
typedef enum zxdh_module_init_e {
ZXDH_MODULE_INIT_NPPU = 0,
ZXDH_MODULE_INIT_PPU,
@@ -173,6 +210,10 @@ typedef enum zxdh_reg_info_e {
ZXDH_DTB_CFG_QUEUE_DTB_LEN = 2,
ZXDH_DTB_INFO_QUEUE_BUF_SPACE = 3,
ZXDH_DTB_CFG_EPID_V_FUNC_NUM = 4,
+ ZXDH_STAT_CAR0_CARA_QUEUE_RAM0 = 9,
+ ZXDH_STAT_CAR0_CARB_QUEUE_RAM0 = 10,
+ ZXDH_STAT_CAR0_CARC_QUEUE_RAM0 = 11,
+ ZXDH_NPPU_PKTRX_CFG_GLBAL_CFG_0R = 12,
ZXDH_REG_ENUM_MAX_VALUE
} ZXDH_REG_INFO_E;
@@ -597,6 +638,170 @@ typedef enum zxdh_se_opr_mode_e {
ZXDH_SE_OPR_WR = 1,
} ZXDH_SE_OPR_MODE_E;
+typedef enum zxdh_stat_car_type_e {
+ ZXDH_STAT_CAR_A_TYPE = 0,
+ ZXDH_STAT_CAR_B_TYPE,
+ ZXDH_STAT_CAR_C_TYPE,
+ ZXDH_STAT_CAR_MAX_TYPE
+} ZXDH_STAT_CAR_TYPE_E;
+
+typedef enum zxdh_car_priority_e {
+ ZXDH_CAR_PRI0 = 0,
+ ZXDH_CAR_PRI1 = 1,
+ ZXDH_CAR_PRI2 = 2,
+ ZXDH_CAR_PRI3 = 3,
+ ZXDH_CAR_PRI4 = 4,
+ ZXDH_CAR_PRI5 = 5,
+ ZXDH_CAR_PRI6 = 6,
+ ZXDH_CAR_PRI7 = 7,
+ ZXDH_CAR_PRI_MAX
+} ZXDH_CAR_PRIORITY_E;
+
+typedef struct zxdh_stat_car_pkt_profile_cfg_t {
+ uint32_t profile_id;
+ uint32_t pkt_sign;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t pri[ZXDH_CAR_PRI_MAX];
+} ZXDH_STAT_CAR_PKT_PROFILE_CFG_T;
+
+typedef struct zxdh_stat_car_profile_cfg_t {
+ uint32_t profile_id;
+ uint32_t pkt_sign;
+ uint32_t cd;
+ uint32_t cf;
+ uint32_t cm;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+ uint32_t random_disc_e;
+ uint32_t random_disc_c;
+ uint32_t c_pri[ZXDH_CAR_PRI_MAX];
+ uint32_t e_green_pri[ZXDH_CAR_PRI_MAX];
+ uint32_t e_yellow_pri[ZXDH_CAR_PRI_MAX];
+} ZXDH_STAT_CAR_PROFILE_CFG_T;
+
+typedef struct zxdh_stat_car0_cara_queue_ram0_159_0_t {
+ uint32_t cara_drop;
+ uint32_t cara_plcr_en;
+ uint32_t cara_profile_id;
+ uint32_t cara_tq_h;
+ uint32_t cara_tq_l;
+ uint32_t cara_ted;
+ uint32_t cara_tcd;
+ uint32_t cara_tei;
+ uint32_t cara_tci;
+} ZXDH_STAT_CAR0_CARA_QUEUE_RAM0_159_0_T;
+
+typedef struct dpp_agent_car_profile_msg {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t rsv;
+ uint8_t rsv1;
+ uint32_t car_level;
+ uint32_t profile_id;
+ uint32_t pkt_sign;
+ uint32_t cd;
+ uint32_t cf;
+ uint32_t cm;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+ uint32_t random_disc_e;
+ uint32_t random_disc_c;
+ uint32_t c_pri[ZXDH_CAR_PRI_MAX];
+ uint32_t e_green_pri[ZXDH_CAR_PRI_MAX];
+ uint32_t e_yellow_pri[ZXDH_CAR_PRI_MAX];
+} ZXDH_AGENT_CAR_PROFILE_MSG_T;
+
+typedef struct dpp_agent_car_pkt_profile_msg {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t rsv;
+ uint8_t rsv1;
+ uint32_t car_level;
+ uint32_t profile_id;
+ uint32_t pkt_sign;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t pri[ZXDH_CAR_PRI_MAX];
+} ZXDH_AGENT_CAR_PKT_PROFILE_MSG_T;
+
+typedef struct zxdh_agent_channel_msg_t {
+ uint32_t msg_len;
+ void *msg;
+} ZXDH_AGENT_CHANNEL_MSG_T;
+
+typedef struct zxdh_agent_channel_plcr_msg {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t oper;
+ uint8_t rsv;
+ uint32_t vport;
+ uint32_t car_type;
+ uint32_t profile_id;
+} ZXDH_AGENT_CHANNEL_PLCR_MSG_T;
+
+typedef struct dpp_stat_car0_carc_queue_ram0_159_0_t {
+ uint32_t carc_drop;
+ uint32_t carc_plcr_en;
+ uint32_t carc_profile_id;
+ uint32_t carc_tq_h;
+ uint32_t carc_tq_l;
+ uint32_t carc_ted;
+ uint32_t carc_tcd;
+ uint32_t carc_tei;
+ uint32_t carc_tci;
+} ZXDH_STAT_CAR0_CARC_QUEUE_RAM0_159_0_T;
+
+typedef struct zxdh_stat_car0_carb_queue_ram0_159_0_t {
+ uint32_t carb_drop;
+ uint32_t carb_plcr_en;
+ uint32_t carb_profile_id;
+ uint32_t carb_tq_h;
+ uint32_t carb_tq_l;
+ uint32_t carb_ted;
+ uint32_t carb_tcd;
+ uint32_t carb_tei;
+ uint32_t carb_tci;
+} ZXDH_STAT_CAR0_CARB_QUEUE_RAM0_159_0_T;
+
+typedef enum zxdh_np_agent_msg_type_e {
+ ZXDH_REG_MSG = 0,
+ ZXDH_DTB_MSG,
+ ZXDH_TM_MSG,
+ ZXDH_PLCR_MSG,
+ ZXDH_PKTRX_IND_REG_RW_MSG,
+ ZXDH_PCIE_BAR_MSG,
+ ZXDH_RESET_MSG,
+ ZXDH_PXE_MSG,
+ ZXDH_TM_FLOW_SHAPE,
+ ZXDH_TM_TD,
+ ZXDH_TM_SE_SHAPE,
+ ZXDH_TM_PP_SHAPE,
+ ZXDH_PLCR_CAR_RATE,
+ ZXDH_PLCR_CAR_PKT_RATE,
+ ZXDH_PPU_THASH_RSK,
+ ZXDH_ACL_MSG,
+ ZXDH_STAT_MSG,
+ ZXDH_RES_MSG,
+ ZXDH_MSG_MAX
+} MSG_TYPE_E;
+
+typedef enum zxdh_msg_plcr_oper {
+ ZXDH_PROFILEID_REQUEST = 0,
+ ZXDH_PROFILEID_RELEASE = 1,
+} ZXDH_MSG_PLCR_OPER_E;
+
+typedef enum zxdh_profile_type {
+ CAR_A = 0,
+ CAR_B = 1,
+ CAR_C = 2,
+ CAR_MAX
+} ZXDH_PROFILE_TYPE;
+
int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
@@ -615,5 +820,22 @@ uint32_t zxdh_np_stat_ppu_cnt_get_ex(uint32_t dev_id,
uint32_t index,
uint32_t clr_mode,
uint32_t *p_data);
+uint32_t
+zxdh_np_car_profile_id_add(uint32_t vport_id,
+ uint32_t flags,
+ uint64_t *p_profile_id);
+uint32_t zxdh_np_car_profile_cfg_set(__rte_unused uint32_t vport_id,
+ uint32_t car_type,
+ uint32_t pkt_sign,
+ uint32_t profile_id,
+ void *p_car_profile_cfg);
+uint32_t zxdh_np_car_profile_id_delete(uint32_t vport_id,
+ uint32_t flags, uint64_t profile_id);
+uint32_t zxdh_np_stat_car_queue_cfg_set(uint32_t dev_id,
+ uint32_t car_type,
+ uint32_t flow_id,
+ uint32_t drop_flag,
+ uint32_t plcr_en,
+ uint32_t profile_id);
#endif /* ZXDH_NP_H */
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index 542cee5e49..37be4cf71e 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -64,6 +64,9 @@
#define ZXDH_FLOW_STATS_INGRESS_BASE 0xADC1
+#define ZXDH_MTR_STATS_EGRESS_BASE 0x7481
+#define ZXDH_MTR_STATS_INGRESS_BASE 0x7C81
+
extern struct zxdh_dtb_shared_data g_dtb_data;
struct zxdh_port_vlan_table {
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 199759 bytes --]
next prev parent reply other threads:[~2025-02-13 6:56 UTC|newest]
Thread overview: 303+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24 1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26 6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15 5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15 5:43 ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15 5:44 ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15 5:44 ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15 5:44 ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15 5:44 ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37 ` Stephen Hemminger
2024-10-15 15:57 ` Stephen Hemminger
2024-10-16 8:16 ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16 8:16 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16 8:18 ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16 8:18 ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21 8:50 ` Thomas Monjalon
2024-10-21 10:56 ` Junlong Wang
2024-10-16 8:18 ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21 8:52 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21 8:54 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18 5:18 ` [v6,9/9] " Junlong Wang
2024-10-18 6:48 ` David Marchand
2024-10-19 11:17 ` Junlong Wang
2024-10-21 9:03 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20 ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-02 0:57 ` Ferruh Yigit
2024-11-04 11:58 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58 ` [PATCH v10 01/10] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-07 10:32 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-12 0:42 ` Thomas Monjalon
2024-12-06 5:57 ` [PATCH v1 00/15] net/zxdh: updated " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-10 5:53 ` [PATCH v2 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-10 5:53 ` [PATCH v2 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-11 16:10 ` Stephen Hemminger
2024-12-12 2:06 ` Junlong Wang
2024-12-12 3:35 ` Junlong Wang
2024-12-17 11:41 ` [PATCH v3 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-17 11:41 ` [PATCH v3 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-17 11:41 ` [PATCH v3 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-17 11:41 ` [PATCH v3 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-17 11:41 ` [PATCH v3 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-17 11:41 ` [PATCH v3 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-17 11:41 ` [PATCH v3 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-17 11:41 ` [PATCH v3 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-17 11:41 ` [PATCH v3 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-18 9:25 ` [PATCH v4 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-18 9:25 ` [PATCH v4 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-18 9:25 ` [PATCH v4 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-18 9:25 ` [PATCH v4 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-18 9:25 ` [PATCH v4 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-21 0:51 ` Stephen Hemminger
2024-12-18 9:25 ` [PATCH v4 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-18 9:25 ` [PATCH v4 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-18 9:25 ` [PATCH v4 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-18 9:26 ` [PATCH v4 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-21 0:44 ` Stephen Hemminger
2024-12-18 9:26 ` [PATCH v4 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-18 9:26 ` [PATCH v4 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-21 0:33 ` Stephen Hemminger
2024-12-23 11:02 ` [PATCH v5 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-23 11:02 ` [PATCH v5 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-23 11:02 ` [PATCH v5 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-23 11:02 ` [PATCH v5 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-23 11:02 ` [PATCH v5 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-23 11:02 ` [PATCH v5 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-23 11:02 ` [PATCH v5 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-23 11:02 ` [PATCH v5 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-23 11:02 ` [PATCH v5 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-24 20:30 ` [PATCH v5 00/15] net/zxdh: updated net zxdh driver Stephen Hemminger
2024-12-24 20:47 ` Stephen Hemminger
2024-12-26 3:37 ` [PATCH v6 " Junlong Wang
2024-12-26 3:37 ` [PATCH v6 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-26 3:37 ` [PATCH v6 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-26 3:37 ` [PATCH v6 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-26 3:37 ` [PATCH v6 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-26 3:37 ` [PATCH v6 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-26 3:37 ` [PATCH v6 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-26 3:37 ` [PATCH v6 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-26 3:37 ` [PATCH v6 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-26 3:37 ` [PATCH v6 15/15] net/zxdh: mtu update " Junlong Wang
2025-01-02 11:39 ` [v6,00/15] net/zxdh: updated net zxdh driver Junlong Wang
2025-01-02 16:42 ` Stephen Hemminger
2025-01-14 18:15 ` [PATCH v6 00/15] " Stephen Hemminger
2025-01-16 2:10 ` [PATCH v7 " Junlong Wang
2025-01-16 2:10 ` [PATCH v7 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2025-01-16 17:04 ` Stephen Hemminger
2025-01-17 1:39 ` Junlong Wang
2025-01-16 2:10 ` [PATCH v7 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2025-01-16 2:10 ` [PATCH v7 03/15] net/zxdh: port tables init implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 04/15] net/zxdh: port tables unint implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2025-01-16 2:10 ` [PATCH v7 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2025-01-16 2:10 ` [PATCH v7 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2025-01-16 2:10 ` [PATCH v7 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2025-01-16 2:10 ` [PATCH v7 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2025-01-16 2:10 ` [PATCH v7 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2025-01-16 2:10 ` [PATCH v7 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2025-01-16 2:11 ` [PATCH v7 15/15] net/zxdh: mtu update " Junlong Wang
2025-01-20 3:47 ` [PATCH v8 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2025-01-20 3:47 ` [PATCH v8 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2025-01-20 3:47 ` [PATCH v8 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2025-01-20 3:47 ` [PATCH v8 03/15] net/zxdh: port tables init implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 04/15] net/zxdh: port tables unint implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2025-01-20 3:47 ` [PATCH v8 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2025-01-20 3:47 ` [PATCH v8 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2025-01-20 3:47 ` [PATCH v8 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2025-01-20 3:47 ` [PATCH v8 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2025-01-20 3:47 ` [PATCH v8 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2025-01-20 3:47 ` [PATCH v8 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2025-01-21 0:21 ` Stephen Hemminger
2025-01-20 3:47 ` [PATCH v8 15/15] net/zxdh: mtu update " Junlong Wang
2025-01-22 17:46 ` [PATCH v8 00/15] net/zxdh: updated net zxdh driver Stephen Hemminger
2025-01-22 18:07 ` Stephen Hemminger
2025-01-23 7:27 ` Junlong Wang
2025-01-21 3:44 ` [PATCH v9 " Junlong Wang
2025-01-21 3:44 ` [PATCH v9 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2025-02-13 6:41 ` [PATCH v1 00/16] net/zxdh: updated net zxdh driver Junlong Wang
2025-02-13 6:41 ` [PATCH v1 01/16] net/zxdh: optimize np dtb channel initialization Junlong Wang
2025-02-13 6:41 ` [PATCH v1 02/16] net/zxdh: optimize queue res alloc/free process Junlong Wang
2025-02-13 6:41 ` [PATCH v1 03/16] net/zxdh: optimize link update process Junlong Wang
2025-02-13 6:41 ` [PATCH v1 04/16] net/zxdh: update rx/tx to latest Junlong Wang
2025-02-13 6:41 ` [PATCH v1 05/16] net/zxdh: provided msg(pfvf) intr callback Junlong Wang
2025-02-13 6:41 ` [PATCH v1 06/16] net/zxdh: optimize mac ops Junlong Wang
2025-02-13 6:41 ` [PATCH v1 07/16] net/zxdh: optimize promisc ops Junlong Wang
2025-02-13 6:41 ` [PATCH v1 08/16] net/zxdh: optimize vlan filter/offload ops Junlong Wang
2025-02-13 6:41 ` [PATCH v1 09/16] net/zxdh: optimize rss hash config/update, reta update/get Junlong Wang
2025-02-13 6:41 ` [PATCH v1 10/16] net/zxdh: optimize mtu set ops Junlong Wang
2025-02-13 6:41 ` [PATCH v1 11/16] net/zxdh: optimize basic stats ops Junlong Wang
2025-02-13 6:41 ` [PATCH v1 12/16] net/zxdh: provided csum/tso/lro config Junlong Wang
2025-02-13 6:41 ` [PATCH v1 13/16] net/zxdh: provided rxq/txq info get implementations Junlong Wang
2025-02-13 6:41 ` [PATCH v1 14/16] net/zxdh: provide extended stats ops implementations Junlong Wang
2025-02-13 6:41 ` [PATCH v1 15/16] net/zxdh: provide ptypes fw_version module info/eeprom ops Junlong Wang
2025-02-13 6:41 ` Junlong Wang [this message]
2025-01-21 3:44 ` [PATCH v9 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2025-01-21 3:44 ` [PATCH v9 03/15] net/zxdh: port tables init implementations Junlong Wang
2025-02-04 2:35 ` Stephen Hemminger
2025-02-05 12:47 ` Thomas Monjalon
2025-01-21 3:44 ` [PATCH v9 04/15] net/zxdh: port tables unint implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2025-01-21 3:44 ` [PATCH v9 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2025-01-21 3:44 ` [PATCH v9 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2025-01-21 3:44 ` [PATCH v9 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2025-01-21 3:44 ` [PATCH v9 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2025-01-21 3:44 ` [PATCH v9 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2025-01-21 3:44 ` [PATCH v9 15/15] net/zxdh: mtu update " Junlong Wang
2025-01-22 7:47 ` [v9,00/15] net/zxdh: updated net zxdh driver Junlong Wang
2025-01-28 20:12 ` [PATCH v9 00/15] " Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-13 19:38 ` Stephen Hemminger
2024-12-13 19:41 ` Stephen Hemminger
2024-12-13 19:41 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-13 19:42 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-13 19:45 ` Stephen Hemminger
2024-12-13 19:48 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-10 5:53 ` [PATCH v2 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-13 21:05 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-13 19:57 ` Stephen Hemminger
2024-12-13 20:08 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-10 5:53 ` [PATCH v2 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-10 5:53 ` [PATCH v2 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-10 5:53 ` [PATCH v2 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-06 5:57 ` [PATCH v1 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-06 5:57 ` [PATCH v1 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-06 5:57 ` [PATCH v1 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 11/15] net/zxdh: promiscuous/allmulticast " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 12/15] net/zxdh: vlan filter, vlan offload " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-06 5:57 ` [PATCH v1 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 15/15] net/zxdh: mtu update " Junlong Wang
2024-11-04 11:58 ` [PATCH v10 02/10] net/zxdh: add logging implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 03/10] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 04/10] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-04 11:58 ` [PATCH v10 05/10] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 06/10] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-04 11:58 ` [PATCH v10 07/10] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 08/10] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-04 11:58 ` [PATCH v10 09/10] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-04 11:58 ` [PATCH v10 10/10] net/zxdh: add zxdh dev close ops Junlong Wang
2024-11-06 0:40 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-07 9:28 ` Ferruh Yigit
2024-11-07 9:58 ` Ferruh Yigit
2024-11-12 2:49 ` Junlong Wang
2024-11-01 6:21 ` [PATCH v9 2/9] net/zxdh: add logging implementation Junlong Wang
2024-11-02 1:02 ` Ferruh Yigit
2024-11-04 2:44 ` [v9,2/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-02 1:01 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-02 1:00 ` Ferruh Yigit
2024-11-04 2:47 ` Junlong Wang
2024-11-01 6:21 ` [PATCH v9 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-01 6:21 ` [PATCH v9 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-02 1:06 ` Ferruh Yigit
2024-11-04 3:30 ` [v9,6/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-02 1:07 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-01 6:21 ` [PATCH v9 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-02 0:56 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-04 2:42 ` Junlong Wang
2024-11-04 8:46 ` Ferruh Yigit
2024-11-04 9:52 ` David Marchand
2024-11-04 11:46 ` Junlong Wang
2024-11-04 22:47 ` Thomas Monjalon
2024-11-05 9:39 ` Junlong Wang
2024-11-06 0:38 ` Ferruh Yigit
2024-10-30 9:01 ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30 14:55 ` David Marchand
2024-10-30 9:01 ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30 9:01 ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-30 9:01 ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30 9:01 ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20 ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47 ` Stephen Hemminger
2024-10-27 16:47 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20 ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05 ` Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20 ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31 ` [v7,9/9] " Junlong Wang
2024-10-25 9:48 ` Junlong Wang
2024-10-26 2:32 ` Junlong Wang
2024-10-27 16:40 ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03 ` Stephen Hemminger
2024-10-27 16:58 ` Stephen Hemminger
2024-12-19 22:38 ` [PATCH v4] net/zxdh: Provided zxdh basic init Stephen Hemminger
2024-12-20 1:47 ` Junlong Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250213064134.88166-17-wang.junlong1@zte.com.cn \
--to=wang.junlong1@zte.com.cn \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).