From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, <thomas@monjalon.net>,
<andrew.rybchenko@oktetlabs.ru>, <qianr@yunsilicon.com>,
<nana@yunsilicon.com>, <zhangxx@yunsilicon.com>,
<zhangxx@yunsilicon.com>, <xudw@yunsilicon.com>,
<jacky@yunsilicon.com>, <weihg@yunsilicon.com>
Subject: [PATCH v4 05/15] net/xsc: add PCT interfaces
Date: Fri, 03 Jan 2025 23:04:15 +0800 [thread overview]
Message-ID: <20250103150413.1529663-6-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com>
PCT is the abbreviation of Packet classifier table, which is built
in NP to define behavior of various packets.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
---
drivers/net/xsc/meson.build | 1 +
drivers/net/xsc/xsc_defs.h | 29 +++
drivers/net/xsc/xsc_dev.c | 19 +-
drivers/net/xsc/xsc_dev.h | 3 +
drivers/net/xsc/xsc_np.c | 492 ++++++++++++++++++++++++++++++++++++
drivers/net/xsc/xsc_np.h | 154 +++++++++++
6 files changed, 697 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/xsc/xsc_np.c
create mode 100644 drivers/net/xsc/xsc_np.h
diff --git a/drivers/net/xsc/meson.build b/drivers/net/xsc/meson.build
index 4e20b30438..5ee03ea835 100644
--- a/drivers/net/xsc/meson.build
+++ b/drivers/net/xsc/meson.build
@@ -11,4 +11,5 @@ sources = files(
'xsc_dev.c',
'xsc_vfio_mbox.c',
'xsc_vfio.c',
+ 'xsc_np.c',
)
diff --git a/drivers/net/xsc/xsc_defs.h b/drivers/net/xsc/xsc_defs.h
index 8fd59133bc..b1e37a5870 100644
--- a/drivers/net/xsc/xsc_defs.h
+++ b/drivers/net/xsc/xsc_defs.h
@@ -16,6 +16,26 @@
#define XSC_VFREP_BASE_LOGICAL_PORT 1081
+#define XSC_RSS_HASH_KEY_LEN 52
+#define XSC_RSS_HASH_BIT_IPV4_SIP (1ULL << 0)
+#define XSC_RSS_HASH_BIT_IPV4_DIP (1ULL << 1)
+#define XSC_RSS_HASH_BIT_IPV6_SIP (1ULL << 2)
+#define XSC_RSS_HASH_BIT_IPV6_DIP (1ULL << 3)
+#define XSC_RSS_HASH_BIT_IPV4_SPORT (1ULL << 4)
+#define XSC_RSS_HASH_BIT_IPV4_DPORT (1ULL << 5)
+#define XSC_RSS_HASH_BIT_IPV6_SPORT (1ULL << 6)
+#define XSC_RSS_HASH_BIT_IPV6_DPORT (1ULL << 7)
+#define XSC_RSS_HASH_BIT_TNL_ID (1ULL << 8)
+#define XSC_RSS_HASH_BIT_NXT_PRO (1ULL << 9)
+
+#define XSC_EPAT_VLD_FLAG (1ULL)
+#define XSC_EPAT_RX_QP_ID_OFST_FLAG (1ULL << 2)
+#define XSC_EPAT_QP_NUM_FLAG (1ULL << 3)
+#define XSC_EPAT_RSS_EN_FLAG (1ULL << 4)
+#define XSC_EPAT_RSS_HASH_TEMPLATE_FLAG (1ULL << 5)
+#define XSC_EPAT_RSS_HASH_FUNC_FLAG (1ULL << 6)
+#define XSC_EPAT_HAS_PPH_FLAG (1ULL << 9)
+
#define XSC_PF_TX_DB_ADDR 0x4802000
#define XSC_PF_RX_DB_ADDR 0x4804000
#define XSC_PF_CQ_DB_ADDR 0x2120000
@@ -38,4 +58,13 @@ enum xsc_pph_type {
XSC_UPLINK_PPH = 0x8,
};
+enum xsc_port_type {
+ XSC_PORT_TYPE_NONE = 0,
+ XSC_PORT_TYPE_UPLINK,
+ XSC_PORT_TYPE_UPLINK_BOND,
+ XSC_PORT_TYPE_PFVF,
+ XSC_PORT_TYPE_PFHPF,
+ XSC_PORT_TYPE_UNKNOWN,
+};
+
#endif /* XSC_DEFS_H_ */
diff --git a/drivers/net/xsc/xsc_dev.c b/drivers/net/xsc/xsc_dev.c
index 1b8a84baa6..02c6346b45 100644
--- a/drivers/net/xsc/xsc_dev.c
+++ b/drivers/net/xsc/xsc_dev.c
@@ -54,8 +54,17 @@ xsc_dev_ops_register(struct xsc_dev_ops *new_ops)
}
int
-xsc_dev_close(struct xsc_dev *xdev, int __rte_unused repr_id)
+xsc_dev_mailbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len)
{
+ return xdev->dev_ops->mailbox_exec(xdev, data_in, in_len,
+ data_out, out_len);
+}
+
+int
+xsc_dev_close(struct xsc_dev *xdev, int repr_id)
+{
+ xsc_dev_clear_pct(xdev, repr_id);
return xdev->dev_ops->dev_close(xdev);
}
@@ -121,6 +130,7 @@ void
xsc_dev_uninit(struct xsc_dev *xdev)
{
PMD_INIT_FUNC_TRACE();
+ xsc_dev_pct_uninit();
xsc_dev_close(xdev, XSC_DEV_REPR_ID_INVALID);
rte_free(xdev);
}
@@ -159,6 +169,13 @@ xsc_dev_init(struct rte_pci_device *pci_dev, struct xsc_dev **xdev)
goto hwinfo_init_fail;
}
+ ret = xsc_dev_pct_init();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to init xsc pct");
+ ret = -EINVAL;
+ goto hwinfo_init_fail;
+ }
+
*xdev = d;
return 0;
diff --git a/drivers/net/xsc/xsc_dev.h b/drivers/net/xsc/xsc_dev.h
index deeddeb7f1..60762c84de 100644
--- a/drivers/net/xsc/xsc_dev.h
+++ b/drivers/net/xsc/xsc_dev.h
@@ -15,6 +15,7 @@
#include "xsc_defs.h"
#include "xsc_log.h"
#include "xsc_rxtx.h"
+#include "xsc_np.h"
#define XSC_PPH_MODE_ARG "pph_mode"
#define XSC_NIC_MODE_ARG "nic_mode"
@@ -154,6 +155,8 @@ struct xsc_dev_ops {
int in_len, void *data_out, int out_len);
};
+int xsc_dev_mailbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len);
void xsc_dev_ops_register(struct xsc_dev_ops *new_ops);
int xsc_dev_init(struct rte_pci_device *pci_dev, struct xsc_dev **dev);
void xsc_dev_uninit(struct xsc_dev *xdev);
diff --git a/drivers/net/xsc/xsc_np.c b/drivers/net/xsc/xsc_np.c
new file mode 100644
index 0000000000..d4eb833bf6
--- /dev/null
+++ b/drivers/net/xsc/xsc_np.c
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_np.h"
+#include "xsc_cmd.h"
+#include "xsc_dev.h"
+
+#define XSC_RSS_HASH_FUNC_TOPELIZ 0x1
+#define XSC_LOGIC_PORT_MASK 0x07FF
+
+#define XSC_DEV_DEF_PCT_IDX_MIN 128
+#define XSC_DEV_DEF_PCT_IDX_MAX 138
+
+/* Each board has a PCT manager*/
+static struct xsc_dev_pct_mgr xsc_pct_mgr;
+
+enum xsc_np_type {
+ XSC_NP_IPAT = 0,
+ XSC_NP_PCT_V4 = 4,
+ XSC_NP_EPAT = 19,
+ XSC_NP_VFOS = 31,
+ XSC_NP_PG_QP_SET_ID = 41,
+ XSC_NP_MAX
+};
+
+enum xsc_np_opcode {
+ XSC_NP_OP_ADD,
+ XSC_NP_OP_DEL,
+ XSC_NP_OP_GET,
+ XSC_NP_OP_CLR,
+ XSC_NP_OP_MOD,
+ XSC_NP_OP_MAX
+};
+
+struct xsc_np_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t len;
+ rte_be16_t rsvd;
+ uint8_t data[];
+};
+
+struct xsc_np_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ rte_be32_t error;
+ rte_be16_t len;
+ rte_be16_t rsvd;
+ uint8_t data[];
+};
+
+struct xsc_np_data_tl {
+ uint16_t table;
+ uint16_t opmod;
+ uint16_t length;
+ uint16_t rsvd;
+};
+
+enum xsc_hash_tmpl {
+ XSC_HASH_TMPL_IDX_IP_PORTS_IP6_PORTS = 0,
+ XSC_HASH_TMPL_IDX_IP_IP6,
+ XSC_HASH_TMPL_IDX_IP_PORTS_IP6,
+ XSC_HASH_TMPL_IDX_IP_IP6_PORTS,
+ XSC_HASH_TMPL_IDX_MAX,
+};
+
+static const int xsc_rss_hash_tmplate[XSC_HASH_TMPL_IDX_MAX] = {
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV4_SPORT | XSC_RSS_HASH_BIT_IPV4_DPORT |
+ XSC_RSS_HASH_BIT_IPV6_SPORT | XSC_RSS_HASH_BIT_IPV6_DPORT,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV4_SPORT | XSC_RSS_HASH_BIT_IPV4_DPORT,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SPORT | XSC_RSS_HASH_BIT_IPV6_DPORT,
+};
+
+static uint8_t
+xsc_rss_hash_template_get(struct rte_eth_rss_conf *rss_conf)
+{
+ int rss_hf = 0;
+ int i = 0;
+ uint8_t idx = 0;
+ uint8_t outer = 1;
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IP) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DIP;
+ }
+
+ if ((rss_conf->rss_hf & RTE_ETH_RSS_UDP) ||
+ (rss_conf->rss_hf & RTE_ETH_RSS_TCP)) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DPORT;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_DIP;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L3_DST_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_SIP;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_DPORT;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L4_DST_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_SPORT;
+ }
+
+ if ((rss_conf->rss_hf & RTE_ETH_RSS_LEVEL_PMD_DEFAULT) ||
+ (rss_conf->rss_hf & RTE_ETH_RSS_LEVEL_OUTERMOST))
+ outer = 1;
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_LEVEL_INNERMOST)
+ outer = 0;
+
+ for (i = 0; i < XSC_HASH_TMPL_IDX_MAX; i++) {
+ if (xsc_rss_hash_tmplate[i] == rss_hf) {
+ idx = i;
+ break;
+ }
+ }
+
+ idx = (idx << 1) | outer;
+ return idx;
+}
+
+static int
+xsc_dev_np_exec(struct xsc_dev *xdev, void *cmd, int len, int table, int opmod)
+{
+ struct xsc_np_data_tl *tl;
+ struct xsc_np_mbox_in *in;
+ struct xsc_np_mbox_out *out;
+ int in_len;
+ int out_len;
+ int data_len;
+ int cmd_len;
+ int ret;
+
+ data_len = sizeof(struct xsc_np_data_tl) + len;
+ in_len = sizeof(struct xsc_np_mbox_in) + data_len;
+ out_len = sizeof(struct xsc_np_mbox_out) + data_len;
+ cmd_len = RTE_MAX(in_len, out_len);
+ in = malloc(cmd_len);
+ memset(in, 0, cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc np cmd memory");
+ return -rte_errno;
+ }
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_EXEC_NP);
+ in->len = rte_cpu_to_be_16(data_len);
+
+ tl = (struct xsc_np_data_tl *)in->data;
+ tl->length = len;
+ tl->table = table;
+ tl->opmod = opmod;
+ if (cmd && len)
+ memcpy(tl + 1, cmd, len);
+
+ out = (struct xsc_np_mbox_out *)in;
+ ret = xsc_dev_mailbox_exec(xdev, in, in_len, out, out_len);
+
+ free(in);
+ return ret;
+}
+
+int
+xsc_dev_create_pct(struct xsc_dev *xdev, int repr_id,
+ uint16_t logical_in_port, uint16_t dst_info)
+{
+ int ret;
+ struct xsc_np_pct_v4_add add;
+ struct xsc_repr_port *repr = &xdev->repr_ports[repr_id];
+ struct xsc_dev_pct_list *pct_list = &repr->def_pct_list;
+
+ memset(&add, 0, sizeof(add));
+ add.key.logical_in_port = logical_in_port & XSC_LOGIC_PORT_MASK;
+ add.mask.logical_in_port = XSC_LOGIC_PORT_MASK;
+ add.action.dst_info = dst_info;
+ add.pct_idx = xsc_dev_pct_idx_alloc();
+ if (add.pct_idx == XSC_DEV_PCT_IDX_INVALID)
+ return -1;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_PCT_V4, XSC_NP_OP_ADD);
+ if (unlikely(ret != 0)) {
+ xsc_dev_pct_idx_free(add.pct_idx);
+ return -1;
+ }
+
+ xsc_dev_pct_entry_insert(pct_list, add.key.logical_in_port, add.pct_idx);
+ return 0;
+}
+
+int
+xsc_dev_destroy_pct(struct xsc_dev *xdev, uint16_t logical_in_port, uint32_t pct_idx)
+{
+ struct xsc_np_pct_v4_del del;
+
+ memset(&del, 0, sizeof(del));
+ del.key.logical_in_port = logical_in_port & XSC_LOGIC_PORT_MASK;
+ del.mask.logical_in_port = XSC_LOGIC_PORT_MASK;
+ del.pct_idx = pct_idx;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_PCT_V4, XSC_NP_OP_DEL);
+}
+
+void
+xsc_dev_clear_pct(struct xsc_dev *xdev, int repr_id)
+{
+ struct xsc_repr_port *repr;
+ struct xsc_dev_pct_entry *pct_entry;
+ struct xsc_dev_pct_list *pct_list;
+
+ if (repr_id == XSC_DEV_REPR_ID_INVALID)
+ return;
+
+ repr = &xdev->repr_ports[repr_id];
+ pct_list = &repr->def_pct_list;
+
+ while ((pct_entry = xsc_dev_pct_first_get(pct_list)) != NULL) {
+ xsc_dev_destroy_pct(xdev, pct_entry->logic_port, pct_entry->pct_idx);
+ xsc_dev_pct_entry_remove(pct_entry);
+ }
+}
+
+int
+xsc_dev_create_ipat(struct xsc_dev *xdev, uint16_t logic_in_port, uint16_t dst_info)
+{
+ struct xsc_np_ipat add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.logical_in_port = logic_in_port;
+ add.action.dst_info = dst_info;
+ add.action.vld = 1;
+ return xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_IPAT, XSC_NP_OP_ADD);
+}
+
+int
+xsc_dev_get_ipat_vld(struct xsc_dev *xdev, uint16_t logic_in_port)
+{
+ int ret;
+ struct xsc_np_ipat get;
+
+ memset(&get, 0, sizeof(get));
+ get.key.logical_in_port = logic_in_port;
+
+ ret = xsc_dev_np_exec(xdev, &get, sizeof(get), XSC_NP_IPAT, XSC_NP_OP_GET);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Get ipat vld failed, logic in port=%u", logic_in_port);
+
+ return get.action.vld;
+}
+
+int
+xsc_dev_destroy_ipat(struct xsc_dev *xdev, uint16_t logic_in_port)
+{
+ struct xsc_ipat_key del;
+
+ memset(&del, 0, sizeof(del));
+ del.logical_in_port = logic_in_port;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_IPAT, XSC_NP_OP_DEL);
+}
+
+int
+xsc_dev_create_epat(struct xsc_dev *xdev, uint16_t dst_info, uint8_t dst_port,
+ uint16_t qpn_ofst, uint8_t qp_num, struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_np_epat_add add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.dst_info = dst_info;
+ add.action.dst_port = dst_port;
+ add.action.vld = 1;
+ add.action.rx_qp_id_ofst = qpn_ofst;
+ add.action.qp_num = qp_num - 1;
+ add.action.rss_en = 1;
+ add.action.rss_hash_func = XSC_RSS_HASH_FUNC_TOPELIZ;
+ add.action.rss_hash_template = xsc_rss_hash_template_get(rss_conf);
+
+ return xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_EPAT, XSC_NP_OP_ADD);
+}
+
+int
+xsc_dev_vf_modify_epat(struct xsc_dev *xdev, uint16_t dst_info, uint16_t qpn_ofst,
+ uint8_t qp_num, struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_np_epat_mod mod;
+
+ memset(&mod, 0, sizeof(mod));
+ mod.flags = XSC_EPAT_VLD_FLAG | XSC_EPAT_RX_QP_ID_OFST_FLAG |
+ XSC_EPAT_QP_NUM_FLAG | XSC_EPAT_HAS_PPH_FLAG |
+ XSC_EPAT_RSS_EN_FLAG | XSC_EPAT_RSS_HASH_TEMPLATE_FLAG |
+ XSC_EPAT_RSS_HASH_FUNC_FLAG;
+
+ mod.key.dst_info = dst_info;
+ mod.action.vld = 1;
+ mod.action.rx_qp_id_ofst = qpn_ofst;
+ mod.action.qp_num = qp_num - 1;
+ mod.action.rss_en = 1;
+ mod.action.rss_hash_func = XSC_RSS_HASH_FUNC_TOPELIZ;
+ mod.action.rss_hash_template = xsc_rss_hash_template_get(rss_conf);
+
+ return xsc_dev_np_exec(xdev, &mod, sizeof(mod), XSC_NP_EPAT, XSC_NP_OP_MOD);
+}
+
+int
+xsc_dev_set_qpsetid(struct xsc_dev *xdev, uint32_t txqpn, uint16_t qp_set_id)
+{
+ int ret;
+ struct xsc_pg_set_id add;
+ uint16_t qp_id_base = xdev->hwinfo.raw_qp_id_base;
+
+ memset(&add, 0, sizeof(add));
+ add.key.qp_id = txqpn - qp_id_base;
+ add.action.qp_set_id = qp_set_id;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_PG_QP_SET_ID, XSC_NP_OP_ADD);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to set qp %u setid %u", txqpn, qp_set_id);
+
+ return ret;
+}
+
+int
+xsc_dev_destroy_epat(struct xsc_dev *xdev, uint16_t dst_info)
+{
+ struct xsc_epat_key del;
+
+ memset(&del, 0, sizeof(del));
+
+ del.dst_info = dst_info;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_EPAT, XSC_NP_OP_DEL);
+}
+
+int
+xsc_dev_create_vfos_baselp(struct xsc_dev *xdev)
+{
+ int ret;
+ struct xsc_np_vfso add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.src_port = xdev->vfrep_offset;
+ add.action.ofst = xdev->vfos_logical_in_port;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_VFOS, XSC_NP_OP_ADD);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to set vfos, port=%u, offset=%u",
+ add.key.src_port, add.action.ofst);
+
+ return ret;
+}
+
+void
+xsc_dev_pct_uninit(void)
+{
+ rte_bitmap_free(xsc_pct_mgr.bmp_pct);
+ rte_free(xsc_pct_mgr.bmp_mem);
+}
+
+int
+xsc_dev_pct_init(void)
+{
+ int ret;
+ uint8_t *bmp_mem;
+ uint32_t pos, pct_sz, bmp_sz;
+
+ if (xsc_pct_mgr.bmp_mem != NULL)
+ return 0;
+
+ pct_sz = XSC_DEV_DEF_PCT_IDX_MAX - XSC_DEV_DEF_PCT_IDX_MIN + 1;
+ bmp_sz = rte_bitmap_get_memory_footprint(pct_sz);
+ bmp_mem = rte_zmalloc(NULL, bmp_sz, RTE_CACHE_LINE_SIZE);
+ if (bmp_mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc pct bitmap memory");
+ ret = -ENOMEM;
+ goto pct_init_fail;
+ }
+
+ xsc_pct_mgr.bmp_mem = bmp_mem;
+ xsc_pct_mgr.bmp_pct = rte_bitmap_init(pct_sz, bmp_mem, bmp_sz);
+ if (xsc_pct_mgr.bmp_pct == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to init pct bitmap");
+ ret = -EINVAL;
+ goto pct_init_fail;
+ }
+
+ /* Mark all pct bitmap available */
+ for (pos = 0; pos < pct_sz; pos++)
+ rte_bitmap_set(xsc_pct_mgr.bmp_pct, pos);
+
+ return 0;
+
+pct_init_fail:
+ xsc_dev_pct_uninit();
+ return ret;
+}
+
+uint32_t
+xsc_dev_pct_idx_alloc(void)
+{
+ int ret;
+ uint64_t slab = 0;
+ uint32_t pos = 0;
+
+ ret = rte_bitmap_scan(xsc_pct_mgr.bmp_pct, &pos, &slab);
+ if (ret != 0) {
+ pos += rte_bsf64(slab);
+ rte_bitmap_clear(xsc_pct_mgr.bmp_pct, pos);
+ return (pos + XSC_DEV_DEF_PCT_IDX_MIN);
+ }
+
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc pct idx");
+ return XSC_DEV_PCT_IDX_INVALID;
+}
+
+void
+xsc_dev_pct_idx_free(uint32_t pct_idx)
+{
+ rte_bitmap_set(xsc_pct_mgr.bmp_pct, pct_idx - XSC_DEV_DEF_PCT_IDX_MIN);
+}
+
+int
+xsc_dev_pct_entry_insert(struct xsc_dev_pct_list *pct_list,
+ uint32_t logic_port, uint32_t pct_idx)
+{
+ struct xsc_dev_pct_entry *pct_entry;
+
+ pct_entry = rte_zmalloc(NULL, sizeof(struct xsc_dev_pct_entry), RTE_CACHE_LINE_SIZE);
+ if (pct_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc pct entry memory");
+ return -ENOMEM;
+ }
+
+ pct_entry->logic_port = logic_port;
+ pct_entry->pct_idx = pct_idx;
+ LIST_INSERT_HEAD(pct_list, pct_entry, next);
+
+ return 0;
+}
+
+struct xsc_dev_pct_entry *
+xsc_dev_pct_first_get(struct xsc_dev_pct_list *pct_list)
+{
+ struct xsc_dev_pct_entry *pct_entry;
+
+ pct_entry = LIST_FIRST(pct_list);
+ return pct_entry;
+}
+
+int
+xsc_dev_pct_entry_remove(struct xsc_dev_pct_entry *pct_entry)
+{
+ if (pct_entry == NULL)
+ return -1;
+
+ xsc_dev_pct_idx_free(pct_entry->pct_idx);
+ LIST_REMOVE(pct_entry, next);
+ rte_free(pct_entry);
+
+ return 0;
+}
diff --git a/drivers/net/xsc/xsc_np.h b/drivers/net/xsc/xsc_np.h
new file mode 100644
index 0000000000..3ceaf93ae4
--- /dev/null
+++ b/drivers/net/xsc/xsc_np.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_NP_H_
+#define _XSC_NP_H_
+
+#include <rte_byteorder.h>
+#include <rte_ethdev.h>
+
+struct xsc_dev;
+
+struct xsc_ipat_key {
+ uint16_t logical_in_port:11;
+ uint16_t rsv:5;
+} __rte_packed;
+
+struct xsc_ipat_action {
+ uint64_t rsv0;
+ uint64_t rsv1:9;
+ uint64_t dst_info:11;
+ uint64_t rsv2:34;
+ uint64_t vld:1;
+ uint64_t rsv:1;
+} __rte_packed;
+
+struct xsc_np_ipat {
+ struct xsc_ipat_key key;
+ struct xsc_ipat_action action;
+};
+
+struct xsc_epat_key {
+ uint16_t dst_info:11;
+ uint16_t rsv:5;
+} __rte_packed;
+
+struct xsc_epat_action {
+ uint8_t rsv0[14];
+ uint8_t rsv1:4;
+ uint8_t dst_port:4;
+ uint8_t rss_hash_func:2;
+ uint8_t rss_hash_template:5;
+ uint8_t rss_en:1;
+ uint8_t qp_num;
+ uint16_t rx_qp_id_ofst:12;
+ uint16_t rsv3:4;
+ uint8_t rsv4:7;
+ uint8_t vld:1;
+} __rte_packed;
+
+struct xsc_np_epat_add {
+ struct xsc_epat_key key;
+ struct xsc_epat_action action;
+};
+
+struct xsc_np_epat_mod {
+ uint64_t flags;
+ struct xsc_epat_key key;
+ struct xsc_epat_action action;
+};
+
+struct xsc_pct_v4_key {
+ uint16_t rsv0[20];
+ uint32_t rsv1:13;
+ uint32_t logical_in_port:11;
+ uint32_t rsv2:8;
+} __rte_packed;
+
+struct xsc_pct_action {
+ uint64_t rsv0:29;
+ uint64_t dst_info:11;
+ uint64_t rsv1:8;
+} __rte_packed;
+
+struct xsc_np_pct_v4_add {
+ struct xsc_pct_v4_key key;
+ struct xsc_pct_v4_key mask;
+ struct xsc_pct_action action;
+ uint32_t pct_idx;
+};
+
+struct xsc_np_pct_v4_del {
+ struct xsc_pct_v4_key key;
+ struct xsc_pct_v4_key mask;
+ uint32_t pct_idx;
+};
+
+struct xsc_pg_qp_set_id_key {
+ uint16_t qp_id:13;
+ uint16_t rsv:3;
+} __rte_packed;
+
+struct xsc_pg_qp_set_id_action {
+ uint16_t qp_set_id:9;
+ uint16_t rsv:7;
+} __rte_packed;
+
+struct xsc_pg_set_id {
+ struct xsc_pg_qp_set_id_key key;
+ struct xsc_pg_qp_set_id_action action;
+};
+
+struct xsc_vfos_key {
+ uint16_t src_port:11;
+ uint16_t rsv:5;
+} __rte_packed;
+
+struct xsc_vfos_start_ofst_action {
+ uint16_t ofst:11;
+ uint16_t rsv:5;
+} __rte_packed;
+
+struct xsc_np_vfso {
+ struct xsc_vfos_key key;
+ struct xsc_vfos_start_ofst_action action;
+};
+
+struct xsc_dev_pct_mgr {
+ uint8_t *bmp_mem;
+ struct rte_bitmap *bmp_pct;
+};
+
+struct xsc_dev_pct_entry {
+ LIST_ENTRY(xsc_dev_pct_entry) next;
+ uint32_t logic_port;
+ uint32_t pct_idx;
+};
+
+LIST_HEAD(xsc_dev_pct_list, xsc_dev_pct_entry);
+
+int xsc_dev_create_pct(struct xsc_dev *xdev, int repr_id,
+ uint16_t logical_in_port, uint16_t dst_info);
+int xsc_dev_destroy_pct(struct xsc_dev *xdev, uint16_t logical_in_port, uint32_t pct_idx);
+void xsc_dev_clear_pct(struct xsc_dev *xdev, int repr_id);
+int xsc_dev_create_ipat(struct xsc_dev *xdev, uint16_t logic_in_port, uint16_t dst_info);
+int xsc_dev_get_ipat_vld(struct xsc_dev *xdev, uint16_t logic_in_port);
+int xsc_dev_destroy_ipat(struct xsc_dev *xdev, uint16_t logic_in_port);
+int xsc_dev_create_epat(struct xsc_dev *xdev, uint16_t dst_info, uint8_t dst_port,
+ uint16_t qpn_ofst, uint8_t qp_num, struct rte_eth_rss_conf *rss_conf);
+int xsc_dev_vf_modify_epat(struct xsc_dev *xdev, uint16_t dst_info, uint16_t qpn_ofst,
+ uint8_t qp_num, struct rte_eth_rss_conf *rss_conf);
+int xsc_dev_destroy_epat(struct xsc_dev *xdev, uint16_t dst_info);
+int xsc_dev_set_qpsetid(struct xsc_dev *xdev, uint32_t txqpn, uint16_t qp_set_id);
+int xsc_dev_create_vfos_baselp(struct xsc_dev *xdev);
+void xsc_dev_pct_uninit(void);
+int xsc_dev_pct_init(void);
+uint32_t xsc_dev_pct_idx_alloc(void);
+void xsc_dev_pct_idx_free(uint32_t pct_idx);
+int xsc_dev_pct_entry_insert(struct xsc_dev_pct_list *pct_list,
+ uint32_t logic_port, uint32_t pct_idx);
+struct xsc_dev_pct_entry *xsc_dev_pct_first_get(struct xsc_dev_pct_list *pct_list);
+int xsc_dev_pct_entry_remove(struct xsc_dev_pct_entry *pct_entry);
+
+#endif /* _XSC_NP_H_ */
--
2.25.1
next prev parent reply other threads:[~2025-01-03 15:05 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-03 15:04 [PATCH v4 00/15] XSC PMD for Yunsilicon NICs WanRenyong
2025-01-03 15:04 ` [PATCH v4 01/15] net/xsc: add xsc PMD framework WanRenyong
2025-01-03 19:00 ` Stephen Hemminger
2025-01-06 1:36 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 02/15] net/xsc: add xsc device initialization WanRenyong
2025-01-03 18:58 ` Stephen Hemminger
2025-01-06 3:29 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 03/15] net/xsc: add xsc mailbox WanRenyong
2025-01-03 15:04 ` [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver WanRenyong
2025-01-03 19:02 ` Stephen Hemminger
2025-01-06 1:53 ` WanRenyong
2025-01-03 19:04 ` Stephen Hemminger
2025-01-06 2:01 ` WanRenyong
2025-01-03 19:06 ` Stephen Hemminger
2025-01-06 2:02 ` WanRenyong
2025-01-03 15:04 ` WanRenyong [this message]
2025-01-03 15:04 ` [PATCH v4 06/15] net/xsc: initialize xsc representors WanRenyong
2025-01-03 15:04 ` [PATCH v4 07/15] net/xsc: add ethdev configure and RSS ops WanRenyong
2025-01-03 19:14 ` Stephen Hemminger
2025-01-06 2:20 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 08/15] net/xsc: add Rx and Tx queue setup WanRenyong
2025-01-03 15:04 ` [PATCH v4 09/15] net/xsc: add ethdev start WanRenyong
2025-01-03 19:17 ` Stephen Hemminger
2025-01-06 3:01 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 10/15] net/xsc: add ethdev stop and close WanRenyong
2025-01-03 15:04 ` [PATCH v4 11/15] net/xsc: add ethdev Rx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 12/15] net/xsc: add ethdev Tx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 13/15] net/xsc: add basic stats ops WanRenyong
2025-01-03 15:04 ` [PATCH v4 14/15] net/xsc: add ethdev infos get WanRenyong
2025-01-03 19:22 ` Stephen Hemminger
2025-01-06 4:03 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 15/15] net/xsc: add ethdev link and MTU ops WanRenyong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250103150413.1529663-6-wanry@yunsilicon.com \
--to=wanry@yunsilicon.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=jacky@yunsilicon.com \
--cc=nana@yunsilicon.com \
--cc=qianr@yunsilicon.com \
--cc=thomas@monjalon.net \
--cc=weihg@yunsilicon.com \
--cc=xudw@yunsilicon.com \
--cc=zhangxx@yunsilicon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).