From: Yuying Zhang <yuying.zhang@intel.com>
To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com,
beilei.xing@intel.com
Cc: Wenjing Qiao <wenjing.qiao@intel.com>
Subject: [PATCH v2 3/8] net/cpfl: add FXP low level implementation
Date: Fri, 1 Sep 2023 11:31:53 +0000 [thread overview]
Message-ID: <20230901113158.1654044-4-yuying.zhang@intel.com> (raw)
In-Reply-To: <20230901113158.1654044-1-yuying.zhang@intel.com>
From: Wenjing Qiao <wenjing.qiao@intel.com>
Add FXP low level implementation for CPFL rte_flow to
create/delete rules.
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
drivers/net/cpfl/cpfl_actions.h | 858 +++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
drivers/net/cpfl/cpfl_controlq.h | 51 ++
drivers/net/cpfl/cpfl_rules.c | 126 +++++
drivers/net/cpfl/cpfl_rules.h | 306 +++++++++++
drivers/net/cpfl/meson.build | 2 +
6 files changed, 1722 insertions(+)
create mode 100644 drivers/net/cpfl/cpfl_actions.h
create mode 100644 drivers/net/cpfl/cpfl_controlq.c
create mode 100644 drivers/net/cpfl/cpfl_controlq.h
create mode 100644 drivers/net/cpfl/cpfl_rules.c
create mode 100644 drivers/net/cpfl/cpfl_rules.h
diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+ uint32_t data;
+
+ struct {
+ uint32_t val : 24;
+ uint32_t idx : 4;
+ uint32_t tag : 1;
+ uint32_t prec : 3;
+ } set_24b_a;
+
+ struct {
+ uint32_t val : 24;
+ uint32_t idx : 3;
+ uint32_t tag : 2;
+ uint32_t prec : 3;
+ } set_24b_b;
+
+ struct {
+ uint32_t val : 16;
+ uint32_t idx : 4;
+ uint32_t unused : 6;
+ uint32_t tag : 3;
+ uint32_t prec : 3;
+ } set_16b;
+
+ struct {
+ uint32_t val_a : 8;
+ uint32_t val_b : 8;
+ uint32_t idx_a : 4;
+ uint32_t idx_b : 4;
+ uint32_t tag : 5;
+ uint32_t prec : 3;
+ } set_8b;
+
+ struct {
+ uint32_t val : 10;
+ uint32_t ena : 10;
+ uint32_t idx : 4;
+ uint32_t tag : 5;
+ uint32_t prec : 3;
+ } set_1b;
+
+ struct {
+ uint32_t val : 24;
+ uint32_t tag : 5;
+ uint32_t prec : 3;
+ } nop;
+
+ struct {
+ uint32_t val : 24;
+ uint32_t tag : 5;
+ uint32_t prec : 3;
+ } chained_24b;
+
+ struct {
+ uint32_t val : 24;
+ uint32_t tag : 5;
+ uint32_t prec : 3;
+ } aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+ union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+ union cpfl_action_set act;
+
+ act.data = 0;
+ return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+ return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s) ((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX 7
+#define CPFL_ACT_PREC_S 29
+#define CPFL_ACT_PREC_M CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p) \
+ (((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p) ((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT 32 /* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ 128 /* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S 24
+#define CPFL_ACT_1B_OP_M CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP ((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S 0
+#define CPFL_ACT_1B_VAL_M CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S 10
+#define CPFL_ACT_1B_EN_M CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S 20
+#define CPFL_ACT_1B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+ ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+ ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+ (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+ (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+ CPFL_ACT_1B_OP_DROP = 0x01,
+ CPFL_ACT_1B_OP_HDR_SPLIT = 0x02,
+ CPFL_ACT_1B_OP_DIR_CHANGE = 0x04,
+ CPFL_ACT_1B_OP_DEFER_DROP = 0x08,
+ CPFL_ACT_1B_OP_ORIG_MIR_MD = 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S 4
+#define CPFL_ACT_1B_COMMIT_MODE_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+ /* Action processing for the initial classification pass */
+ CPFL_ACT_COMMIT_ALL = 0, /* Commit all actions */
+ CPFL_ACT_COMMIT_PRE_MOD = 1, /* Commit only pre-modify actions*/
+ CPFL_ACT_COMMIT_NONE = 2, /* Commit no action */
+ /* Action processing for deferred actions in a recirculation pass */
+ CPFL_ACT_COMMIT_RECIR_ALL = 4, /* Commit all actions */
+ CPFL_ACT_COMMIT_RECIR_PRE_MOD = 5, /* Commit only pre-modify actions*/
+ CPFL_ACT_COMMIT_RECIR_NONE = 6 /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S 24
+#define CPFL_ACT_OP_8B_M CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B ((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S 0
+#define CPFL_ACT_8B_A_VAL_M CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S 16
+#define CPFL_ACT_8B_A_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S 8
+#define CPFL_ACT_8B_B_VAL_M CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S 20
+#define CPFL_ACT_8B_B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+ ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+ (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+ (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+ (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+ (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META 9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT 16
+#define CPFL_ACT_8B_MOD_META_VALID 0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S 26
+#define CPFL_ACT_OP_16B_M CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B ((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S 16
+#define CPFL_ACT_16B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S 0
+#define CPFL_ACT_16B_VAL_M CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+ ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+ (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+ (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET 0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX 1
+#define CPFL_ACT_16B_INDEX_SET_VSI 2
+#define CPFL_ACT_16B_INDEX_DEL_MD 4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST 5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT 2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS 2
+#define CPFL_ACT_16B_FWD_VSI_CNT 1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT 256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT 1024
+#define CPFL_ACT_16B_FWD_PORT_CNT 4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT 32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS 4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID ((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S 0
+#define CPFL_ACT_16B_SET_VSI_VAL_M \
+ CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S 11
+#define CPFL_ACT_16B_SET_VSI_PE_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S 14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S 0
+#define CPFL_ACT_16B_DEL_MD_1_S 5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S 0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M \
+ CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S 14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+ ((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+ CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+ (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+ CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+ ((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+ CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+ (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+ CPFL_ACT_16B_SET_VSI_PE_M) | \
+ (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+ CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+ CPFL_PE_LAN = 0,
+ CPFL_PE_RDMA,
+ CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+ CPFL_ACT_FWD_VSI,
+ CPFL_ACT_FWD_VSI_LIST,
+ CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S 28
+#define CPFL_ACT_OP_24B_A_M CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S 24
+#define CPFL_ACT_24B_A_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S 0
+#define CPFL_ACT_24B_A_VAL_M CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A ((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+ ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+ (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+ (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR 0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST 1
+#define CPFL_ACT_24B_INDEX_COUNT 2
+#define CPFL_ACT_24B_INDEX_SET_Q 8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE 9
+#define CPFL_ACT_24B_INDEX_METER 10
+
+#define CPFL_ACT_24B_COUNT_SLOTS 6
+#define CPFL_ACT_24B_METER_SLOTS 6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT (16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT ((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT (12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS 3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S 0
+#define CPFL_ACT_24B_SET_Q_Q_M \
+ CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S 14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S 21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+ CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+ CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+ CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S 0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M \
+ CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S 12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S 14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS ((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND ((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID ((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES 4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT 2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S 0
+#define CPFL_ACT_24B_METER_INDEX_M \
+ CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S 20
+#define CPFL_ACT_24B_METER_BANK_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID ((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT 6
+#define CPFL_ACT_24B_METER_INDEX_CNT ((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S 27
+#define CPFL_ACT_OP_24B_B_M CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S 24
+#define CPFL_ACT_24B_B_INDEX_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S 0
+#define CPFL_ACT_24B_B_VAL_M CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B ((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+ ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+ (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+ (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD 0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK 6
+#define CPFL_ACT_24B_SET_MD_SLOTS 6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S 0
+#define CPFL_ACT_24B_SET_MD8_VAL_M \
+ CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S 8
+#define CPFL_ACT_24B_SET_MD8_MASK_M \
+ CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S 16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M \
+ CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S 20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S 0
+#define CPFL_ACT_24B_SET_MD16_VAL_M \
+ CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S 16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M \
+ CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR 8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M 0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S 16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M \
+ CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S 20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16 ((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX 15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX 7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX 15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX 7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+ CPFL_RULE_ACT_RC_1_RANGE = 0,
+ CPFL_RULE_ACT_RC_2_RANGES = 1,
+ CPFL_RULE_ACT_RC_4_RANGES = 2,
+ CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S 0
+#define CPFL_ACT_24B_RC_TBL_IDX_M \
+ CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S 13
+#define CPFL_ACT_24B_RC_START_BANK_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S 16
+#define CPFL_ACT_24B_RC_MODE_M \
+ CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S 18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M \
+ CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT (8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT 8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT 64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated. Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets. The first one is
+ * the chained AUX action set. The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S 8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M \
+ CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD ((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S 16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M \
+ CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S 0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M \
+ CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S 0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M \
+ CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR 24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M 0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S 29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M \
+ CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA ((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S 21
+#define CPFL_ACT_24B_SET_MD_OP_8B ((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B ((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B ((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+ (CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+ (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+ CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+ (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+ CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+ (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+ CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ * - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ * - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ * - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped. Recirculation is canceled.
+ * - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ * dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+ union cpfl_action_set a;
+
+ if (!CPFL_ACT_PREC_CHECK(prec))
+ return cpfl_act_nop();
+ a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+ return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+ union cpfl_action_set a;
+
+ if (!CPFL_ACT_PREC_CHECK(prec))
+ return cpfl_act_nop();
+ a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+ (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+ return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+ union cpfl_action_set a;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+ return cpfl_act_nop();
+
+ a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+ CPFL_ACT_8B_MOD_META_VALID | prof);
+
+ return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots. The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+ vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+ a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+ val);
+
+ return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots. The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+ port >= CPFL_ACT_16B_FWD_PORT_CNT)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+ a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+ val);
+
+ return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses. Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+ union cpfl_action_set a;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+ return cpfl_act_nop();
+
+ a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+ mod_addr);
+
+ return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+ bool no_implicit_vsi)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+ (((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+ CPFL_ACT_24B_SET_Q_DST_PE_M);
+ if (no_implicit_vsi)
+ val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+ a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+ return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+ uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+ q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+ ((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+ (((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+ CPFL_ACT_24B_SET_Q_DST_PE_M);
+ if (no_implicit_vsi)
+ val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+ a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+ return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified. In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot. If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+ bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) ||
+ prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+ ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+ (((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+ CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+ (((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+ CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+ ((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+ if (append_act_bus)
+ val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+ if (miss_prepend)
+ val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+ a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+ return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots. If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+ union cpfl_action_set a;
+ uint32_t val;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS ||
+ idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+ bank >= CPFL_ACT_24B_METER_BANK_CNT)
+ return cpfl_act_nop();
+
+ val = CPFL_ACT_24B_METER_VALID |
+ (uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+ (uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+ a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+ val);
+
+ return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots. This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+ union cpfl_action_set a;
+ uint32_t tmp;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+ mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+ off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+ return cpfl_act_nop();
+
+ tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+ ((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+ ((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+ ((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+ a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+ tmp);
+
+ return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots. This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+ union cpfl_action_set a;
+ uint32_t tmp;
+
+ if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+ mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+ word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+ return cpfl_act_nop();
+
+ tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+ ((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+ ((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+ ((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+ a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+ tmp);
+
+ return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots. This action is made up of 2 chained
+ * action sets. The chained action set is the first. The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+ uint8_t off, uint32_t val)
+{
+ if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+ mid >= CPFL_METADATA_ID_CNT ||
+ (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+ ext->acts[0] = cpfl_act_nop();
+ ext->acts[1] = cpfl_act_nop();
+ } else {
+ uint32_t tmp;
+
+ /* Chained action set comes first */
+ tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+ ext->acts[0].data =
+ CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+ mid, off, tmp);
+
+ /* Lower 24 bits of value */
+ tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+ ext->acts[1].data =
+ CPFL_ACT_MAKE_24B_B(prec,
+ CPFL_ACT_24B_INDEX_SET_MD + slot,
+ tmp);
+ }
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+ struct idpf_dma_mem *ring = &qinfo->ring_mem;
+ struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+ if (!ring->va || !ring->size)
+ return -EBADR;
+
+ if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+ return -EINVAL;
+
+ /* no need for buffer checks for TX queues */
+ if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+ qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+ qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+ return 0;
+
+ if (!buf->va || !buf->size)
+ return -EBADR;
+
+ /* accommodate different types of rx ring buffer sizes */
+ if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+ buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+ (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+ buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+ return -EBADR;
+
+ return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+ struct cpfl_ctlq_create_info *qinfo)
+{
+ int ret_code = 0;
+ unsigned int elem_size;
+ int i = 0;
+
+ ret_code = cpfl_check_dma_mem_parameters(qinfo);
+ if (ret_code)
+ /* TODO: Log an error message per CP */
+ goto err;
+
+ cq->desc_ring.va = qinfo->ring_mem.va;
+ cq->desc_ring.pa = qinfo->ring_mem.pa;
+ cq->desc_ring.size = qinfo->ring_mem.size;
+
+ switch (cq->cq_type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ case IDPF_CTLQ_TYPE_CONFIG_RX:
+ case IDPF_CTLQ_TYPE_EVENT_RX:
+ case IDPF_CTLQ_TYPE_RDMA_RX:
+ /* Only receive queues will have allocated buffers
+ * during init. CP allocates one big chunk of DMA
+ * region who size is equal to ring_len * buff_size.
+ * In CPFLib, the block gets broken down to multiple
+ * smaller blocks that actually gets programmed in the hardware.
+ */
+
+ cq->bi.rx_buff = (struct idpf_dma_mem **)
+ idpf_calloc(hw, cq->ring_size,
+ sizeof(struct idpf_dma_mem *));
+ if (!cq->bi.rx_buff) {
+ ret_code = -ENOMEM;
+ /* TODO: Log an error message per CP */
+ goto err;
+ }
+
+ elem_size = qinfo->buf_size;
+ for (i = 0; i < cq->ring_size; i++) {
+ cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+ (hw, 1,
+ sizeof(struct idpf_dma_mem));
+ if (!cq->bi.rx_buff[i]) {
+ ret_code = -ENOMEM;
+ goto free_rx_buffs;
+ }
+ cq->bi.rx_buff[i]->va =
+ (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+ cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+ (i * elem_size);
+ cq->bi.rx_buff[i]->size = elem_size;
+ }
+ break;
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ case IDPF_CTLQ_TYPE_CONFIG_TX:
+ case IDPF_CTLQ_TYPE_RDMA_TX:
+ case IDPF_CTLQ_TYPE_RDMA_COMPL:
+ break;
+ default:
+ ret_code = -EBADR;
+ }
+
+ return ret_code;
+
+free_rx_buffs:
+ i--;
+ for (; i >= 0; i--)
+ idpf_free(hw, cq->bi.rx_buff[i]);
+
+ if (!cq->bi.rx_buff)
+ idpf_free(hw, cq->bi.rx_buff);
+
+err:
+ return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+ int i = 0;
+
+ for (i = 0; i < cq->ring_size; i++) {
+ struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+ struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+ /* No buffer to post to descriptor, continue */
+ if (!bi)
+ continue;
+
+ desc->flags =
+ CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+ desc->opcode = 0;
+ desc->datalen = CPU_TO_LE16(bi->size);
+ desc->ret_val = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.indirect.addr_high =
+ CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+ desc->params.indirect.addr_low =
+ CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+ desc->params.indirect.param0 = 0;
+ desc->params.indirect.param1 = 0;
+ }
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+ /* set control queue registers in our local struct */
+ cq->reg.head = q_create_info->reg.head;
+ cq->reg.tail = q_create_info->reg.tail;
+ cq->reg.len = q_create_info->reg.len;
+ cq->reg.bah = q_create_info->reg.bah;
+ cq->reg.bal = q_create_info->reg.bal;
+ cq->reg.len_mask = q_create_info->reg.len_mask;
+ cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+ cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+ /* Update tail to post pre-allocated buffers for rx queues */
+ if (is_rxq)
+ wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+ /* For non-Mailbox control queues only TAIL need to be set */
+ if (cq->q_id != -1)
+ return;
+
+ /* Clear Head for both send or receive */
+ wr32(hw, cq->reg.head, 0);
+
+ /* set starting point */
+ wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+ wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+ wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+ int i;
+
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+ cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+ for (i = 0; i < cq->ring_size; i++)
+ idpf_free(hw, cq->bi.rx_buff[i]);
+ /* free the buffer header */
+ idpf_free(hw, cq->bi.rx_buff);
+ } else {
+ idpf_free(hw, cq->bi.tx_msg);
+ }
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq_out)
+{
+ struct idpf_ctlq_info *cq;
+ bool is_rxq = false;
+ int status = 0;
+
+ if (!qinfo->len || !qinfo->buf_size ||
+ qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+ qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ cq = (struct idpf_ctlq_info *)
+ idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+ if (!cq)
+ return -ENOMEM;
+
+ cq->cq_type = qinfo->type;
+ cq->q_id = qinfo->id;
+ cq->buf_size = qinfo->buf_size;
+ cq->ring_size = qinfo->len;
+
+ cq->next_to_use = 0;
+ cq->next_to_clean = 0;
+ cq->next_to_post = cq->ring_size - 1;
+
+ switch (qinfo->type) {
+ case IDPF_CTLQ_TYPE_EVENT_RX:
+ case IDPF_CTLQ_TYPE_CONFIG_RX:
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ is_rxq = true;
+ /* fallthrough */
+ case IDPF_CTLQ_TYPE_CONFIG_TX:
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+ break;
+
+ default:
+ status = -EINVAL;
+ break;
+ }
+
+ if (status)
+ goto init_free_q;
+
+ if (is_rxq) {
+ cpfl_ctlq_init_rxq_bufs(cq);
+ } else {
+ /* Allocate the array of msg pointers for TX queues */
+ cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+ idpf_calloc(hw, qinfo->len,
+ sizeof(struct idpf_ctlq_msg *));
+ if (!cq->bi.tx_msg) {
+ status = -ENOMEM;
+ goto init_dealloc_q_mem;
+ }
+ }
+
+ cpfl_ctlq_setup_regs(cq, qinfo);
+
+ cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+ idpf_init_lock(&cq->cq_lock);
+
+ LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+ *cq_out = cq;
+ return status;
+
+init_dealloc_q_mem:
+ /* free ring buffers and the ring itself */
+ cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+ idpf_free(hw, cq);
+ cq = NULL;
+
+ return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq)
+{
+ return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ idpf_acquire_lock(&cq->cq_lock);
+
+ if (!cq->ring_size)
+ goto shutdown_sq_out;
+
+ /* free ring buffers and the ring itself */
+ cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+ /* Set ring_size to 0 to indicate uninitialized queue */
+ cq->ring_size = 0;
+
+shutdown_sq_out:
+ idpf_release_lock(&cq->cq_lock);
+ idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ LIST_REMOVE(cq, cq_list);
+ cpfl_ctlq_shutdown(hw, cq);
+ idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE 32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE 4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE 256
+#define CPFL_DFLT_MBX_RING_LEN 512
+#define CPFL_CFGQ_RING_LEN 512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+ enum idpf_ctlq_type type;
+ int id; /* absolute queue offset passed as input
+ * -1 for default mailbox if present
+ */
+ uint16_t len; /* Queue length passed as input */
+ uint16_t buf_size; /* buffer size passed as input */
+ uint64_t base_address; /* output, HPA of the Queue start */
+ struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+ /* Pass down previously allocated descriptor ring and buffer memory
+ * for each control queue to be created
+ */
+ struct idpf_dma_mem ring_mem;
+ /* The CP will allocate one large buffer that the CPFlib will piece
+ * into individual buffers for each descriptor
+ */
+ struct idpf_dma_mem buf_mem;
+
+ int ext_info_size;
+ void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq,
+ struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+ struct cpfl_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+ struct cpfl_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+ * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+ uint64_t context = 0;
+
+ switch (cmn_cfg->opc) {
+ case cpfl_ctlq_mod_query_rule:
+ case cpfl_ctlq_mod_add_update_rule:
+ /* fallthrough */
+ case cpfl_ctlq_sem_query_rule_hash_addr:
+ case cpfl_ctlq_sem_query_del_rule_hash_addr:
+ case cpfl_ctlq_sem_add_rule:
+ case cpfl_ctlq_sem_del_rule:
+ case cpfl_ctlq_sem_query_rule:
+ case cpfl_ctlq_sem_update_rule:
+ context |= SHIFT_VAL64(cmn_cfg->time_sel,
+ MEV_RULE_TIME_SEL);
+ context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+ MEV_RULE_TIME_SEL_VAL);
+ context |= SHIFT_VAL64(cmn_cfg->host_id,
+ MEV_RULE_HOST_ID);
+ context |= SHIFT_VAL64(cmn_cfg->port_num,
+ MEV_RULE_PORT_NUM);
+ context |= SHIFT_VAL64(cmn_cfg->resp_req,
+ MEV_RULE_RESP_REQ);
+ context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+ MEV_RULE_CACHE_WR_THRU);
+ break;
+ default:
+ break;
+ }
+
+ return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+ uint64_t context = 0;
+
+ context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+ switch (cfg_data->common.opc) {
+ case cpfl_ctlq_mod_query_rule:
+ case cpfl_ctlq_mod_add_update_rule:
+ context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+ MEV_RULE_MOD_OBJ_SIZE);
+ context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+ MEV_RULE_PIN_MOD_CONTENT);
+ context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+ MEV_RULE_MOD_INDEX);
+ break;
+ case cpfl_ctlq_sem_query_rule_hash_addr:
+ case cpfl_ctlq_sem_query_del_rule_hash_addr:
+ context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+ MEV_RULE_OBJ_ID);
+ context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+ MEV_RULE_OBJ_ADDR);
+ break;
+ default:
+ break;
+ }
+
+ return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ uint64_t context;
+ uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+ context = cpfl_prep_rule_desc_ctx(cfg_data);
+ *ctlq_ctx = CPU_TO_LE64(context);
+ memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+ ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+ ctlq_msg->data_len = cfg_data->common.buf_len;
+ ctlq_msg->status = 0;
+ ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+ uint8_t key_byte_len,
+ const uint8_t *act_bytes,
+ uint8_t act_byte_len,
+ uint16_t cfg_ctrl,
+ union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+ uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+ const uint32_t *act_src = (const uint32_t *)act_bytes;
+ uint32_t i;
+
+ idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+ idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+ CPFL_NONDMA_TO_DMA);
+
+ for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+ *act_dst++ = CPU_TO_LE32(*act_src++);
+
+ *((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b) (1 << (b))
+
+#define MAKE_MASK(type, mask, shift) ((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field) \
+ (((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field) \
+ (((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len) (((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len) MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len) MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift) MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift) MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift) MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift) \
+ ((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift) MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift) MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift) MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field) SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field) SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field) SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+ cpfl_ctlq_sem_add_rule = 0x1303,
+ cpfl_ctlq_sem_update_rule = 0x1304,
+ cpfl_ctlq_sem_del_rule = 0x1305,
+ cpfl_ctlq_sem_query_rule = 0x1306,
+ cpfl_ctlq_sem_query_rule_hash_addr = 0x1307,
+ cpfl_ctlq_sem_query_del_rule_hash_addr = 0x1308,
+
+ cpfl_ctlq_mod_add_update_rule = 0x1360,
+ cpfl_ctlq_mod_query_rule = 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+ CPFL_CFG_PKT_ERR_OK = 0,
+ CPFL_CFG_PKT_ERR_ESRCH = 1, /* Bad opcode */
+ CPFL_CFG_PKT_ERR_EEXIST = 2, /* Entry Already exists */
+ CPFL_CFG_PKT_ERR_ENOSPC = 4, /* No space left in the table*/
+ CPFL_CFG_PKT_ERR_ERANGE = 5, /* Parameter out of range */
+ CPFL_CFG_PKT_ERR_ESBCOMP = 6, /* Completion Error */
+ CPFL_CFG_PKT_ERR_ENOPIN = 7, /* Entry cannot be pinned in cache */
+ CPFL_CFG_PKT_ERR_ENOTFND = 8, /* Entry Not exists */
+ CPFL_CFG_PKT_ERR_EMAXCOL = 9 /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S 0
+#define MEV_RULE_VSI_ID_M \
+ MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S 13
+#define MEV_RULE_TIME_SEL_M \
+ MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S 15
+#define MEV_RULE_TIME_SEL_VAL_M \
+ MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S 16
+#define MEV_RULE_HOST_ID_S 18
+#define MEV_RULE_PORT_NUM_M \
+ MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M \
+ MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S 21
+#define MEV_RULE_CACHE_WR_THRU_M \
+ MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S 22
+#define MEV_RULE_RESP_REQ_M \
+ MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S 24
+#define MEV_RULE_OBJ_ADDR_M \
+ MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S 59
+#define MEV_RULE_OBJ_ID_M \
+ MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S 0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M \
+ MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S 11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M \
+ MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S 13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M \
+ MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S 14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M \
+ MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S 15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M \
+ MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id, \
+ pin_to_cache, fixed_fetch) \
+ (SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID) | \
+ SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID) | \
+ SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE) | \
+ SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror) \
+ (SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID) | \
+ SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE) | \
+ SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S 24
+#define MEV_RULE_MOD_INDEX_M \
+ MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S 62
+#define MEV_RULE_PIN_MOD_CONTENT_M \
+ MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S 63
+#define MEV_RULE_MOD_OBJ_SIZE_M \
+ MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+ uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+ uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+ /* Bit(s):
+ * 10:0 : PROFILE_ID
+ * 12:11: SUB_PROF_ID (used for SEM only)
+ * 13 : pin the SEM key content into the cache
+ * 14 : Reserved
+ * 15 : Fixed_fetch
+ */
+ uint8_t cfg_ctrl[2];
+
+ /* Bit(s):
+ * 0: valid
+ * 15:1: Hints
+ * 26:16: PROFILE_ID, the profile associated with the entry
+ * 31:27: PF
+ * 55:32: FLOW ID (assigned by HW)
+ * 63:56: EPOCH
+ */
+ uint8_t ctrl_word[8];
+ uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+ struct cpfl_sem_rule_cfg_pkt sem_rule;
+ uint8_t pkt_data[256];
+ uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+ uint8_t obj_id;
+ uint32_t obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+ uint8_t obj_id;
+ uint32_t obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+ uint8_t obj_size;
+ uint8_t pin_content;
+ uint32_t index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+ enum cpfl_ctlq_rule_cfg_opc opc;
+ uint64_t cookie;
+ uint16_t vsi_id;
+ uint8_t port_num;
+ uint8_t host_id;
+ uint8_t time_sel;
+ uint8_t time_sel_val;
+ uint8_t cache_wr_thru;
+ uint8_t resp_req;
+ uint32_t ret_val;
+ uint16_t buf_len;
+ struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ * in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+ struct cpfl_rule_cfg_data_common common;
+ union {
+ struct cpfl_rule_query_addr query_addr;
+ struct cpfl_rule_query_del_addr query_del_addr;
+ struct cpfl_rule_mod_content mod_content;
+ } ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+ uint8_t pin_mod_content,
+ uint32_t mod_index,
+ struct cpfl_rule_mod_content *mod_content)
+{
+ mod_content->obj_size = mod_obj_size;
+ mod_content->pin_content = pin_mod_content;
+ mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+ uint64_t cookie,
+ uint16_t vsi_id,
+ uint8_t port_num,
+ uint8_t host_id,
+ uint8_t time_sel,
+ uint8_t time_sel_val,
+ uint8_t cache_wr_thru,
+ uint8_t resp_req,
+ uint16_t payload_len,
+ struct idpf_dma_mem *payload,
+ struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+ cfg_cmn->opc = opc;
+ cfg_cmn->cookie = cookie;
+ cfg_cmn->vsi_id = vsi_id;
+ cfg_cmn->port_num = port_num;
+ cfg_cmn->resp_req = resp_req;
+ cfg_cmn->ret_val = 0;
+ cfg_cmn->host_id = host_id;
+ cfg_cmn->time_sel = time_sel;
+ cfg_cmn->time_sel_val = time_sel_val;
+ cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+ cfg_cmn->buf_len = payload_len;
+ cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+ struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+ uint8_t key_byte_len,
+ const uint8_t *act_bytes,
+ uint8_t act_byte_len,
+ uint16_t cfg_ctrl,
+ union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 7b8d043011..9a8d25ffae 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
'cpfl_rxtx.c',
'cpfl_representor.c',
'cpfl_vchnl.c',
+ 'cpfl_controlq.c',
)
if arch_subdir == 'x86'
@@ -43,6 +44,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
if js_dep.found()
sources += files(
'cpfl_flow_parser.c',
+ 'cpfl_rules.c',
)
dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
ext_deps += js_dep
--
2.25.1
next prev parent reply other threads:[~2023-09-01 11:10 UTC|newest]
Thread overview: 128+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-12 7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
2023-08-12 7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
2023-08-25 3:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
2023-08-25 5:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
2023-08-25 6:23 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
2023-08-25 7:35 ` Xing, Beilei
2023-08-25 8:42 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
2023-08-25 9:15 ` Xing, Beilei
2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
2023-09-01 11:31 ` Yuying Zhang [this message]
2023-09-01 11:31 ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
2023-09-06 9:33 ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
2023-08-15 16:50 ` [PATCH v4 " Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 15:11 ` Stephen Hemminger
2023-08-15 16:50 ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
2023-09-06 9:33 ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-09-11 0:48 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
2023-09-08 6:26 ` Liu, Mingxia
2023-09-11 6:24 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
2023-09-11 6:30 ` Liu, Mingxia
2023-09-11 6:36 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
2023-09-12 7:40 ` FW: " Liu, Mingxia
2023-09-06 9:34 ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
2023-09-15 10:00 ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 0/8] " Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-26 18:16 ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
2023-09-26 18:16 ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-26 19:03 ` Stephen Hemminger
2023-09-27 1:21 ` Zhang, Qi Z
2023-09-26 18:16 ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-26 18:16 ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-26 18:16 ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
2023-09-26 18:17 ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-26 18:17 ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 3:29 ` Zhang, Qi Z
2023-09-26 18:17 ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
2023-09-26 18:17 ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
2023-09-27 12:54 ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-27 12:54 ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-27 12:54 ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-27 12:54 ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
2023-09-27 12:54 ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-27 12:54 ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-27 12:54 ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-27 12:54 ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 3:37 ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 " yuying.zhang
2023-09-08 16:05 ` [PATCH v10 " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-09-28 8:44 ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-28 8:44 ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-28 8:44 ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-10-15 13:01 ` Thomas Monjalon
2023-10-16 3:07 ` Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
2023-09-28 8:44 ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-28 8:44 ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 8:44 ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-28 8:44 ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-28 8:44 ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 12:45 ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 16:04 ` Stephen Hemminger
2023-10-09 4:00 ` [PATCH v10 " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-10-10 1:31 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-10-15 11:21 ` [PATCH v9 " Thomas Monjalon
2023-09-15 10:00 ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 11:14 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-15 11:19 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230901113158.1654044-4-yuying.zhang@intel.com \
--to=yuying.zhang@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=wenjing.qiao@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).