From: Rosen Xu <rosen.xu@intel.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, tianfei.zhang@intel.com,
dan.wei@intel.com, rosen.xu@intel.com, andy.pei@intel.com,
qiming.yang@intel.com, haiyue.wang@intel.com,
santos.chen@intel.com, zhang.zhang@intel.com,
david.lomartire@intel.com, jia.hu@intel.com
Subject: [dpdk-dev] [PATCH v8 05/14] net/ipn3ke: add IPN3KE TM of PMD driver
Date: Mon, 15 Apr 2019 13:06:54 +0800 [thread overview]
Message-ID: <1555304823-91456-6-git-send-email-rosen.xu@intel.com> (raw)
Message-ID: <20190415050654.ZevdslUqG5zSBuPA6s_irbRbKYqHDFrkh8_oXVpVM7k@z> (raw)
In-Reply-To: <1555304823-91456-1-git-send-email-rosen.xu@intel.com>
Add Intel FPGA Acceleration NIC IPN3KE TM of PMD driver.
Signed-off-by: Rosen Xu <rosen.xu@intel.com>
Signed-off-by: Andy Pei <andy.pei@intel.com>
Signed-off-by: Dan Wei <dan.wei@intel.com>
---
drivers/net/ipn3ke/Makefile | 1 +
drivers/net/ipn3ke/ipn3ke_ethdev.c | 3 +
drivers/net/ipn3ke/ipn3ke_ethdev.h | 7 +
drivers/net/ipn3ke/ipn3ke_representor.c | 5 +
drivers/net/ipn3ke/ipn3ke_tm.c | 2068 +++++++++++++++++++++++++++++++
drivers/net/ipn3ke/meson.build | 3 +-
6 files changed, 2086 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ipn3ke/ipn3ke_tm.c
diff --git a/drivers/net/ipn3ke/Makefile b/drivers/net/ipn3ke/Makefile
index 221567d..38d9384 100644
--- a/drivers/net/ipn3ke/Makefile
+++ b/drivers/net/ipn3ke/Makefile
@@ -34,5 +34,6 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_representor.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_tm.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.c b/drivers/net/ipn3ke/ipn3ke_ethdev.c
index 58c8ce4..508ea01 100644
--- a/drivers/net/ipn3ke/ipn3ke_ethdev.c
+++ b/drivers/net/ipn3ke/ipn3ke_ethdev.c
@@ -262,6 +262,9 @@
hw->flow_hw_enable = 0;
if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
+ ret = ipn3ke_hw_tm_init(hw);
+ if (ret)
+ return ret;
hw->tm_hw_enable = 1;
hw->flow_hw_enable = 1;
}
diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.h b/drivers/net/ipn3ke/ipn3ke_ethdev.h
index d2c73e5..36ff2f8 100644
--- a/drivers/net/ipn3ke/ipn3ke_ethdev.h
+++ b/drivers/net/ipn3ke/ipn3ke_ethdev.h
@@ -552,6 +552,13 @@ static inline void _ipn3ke_indrct_write(struct ipn3ke_hw *hw,
ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params);
int
ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev);
+int
+ipn3ke_hw_tm_init(struct ipn3ke_hw *hw);
+void
+ipn3ke_tm_init(struct ipn3ke_rpst *rpst);
+int
+ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
+ void *arg);
/* IPN3KE_MASK is a macro used on 32 bit registers */
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 3831982..63098bf 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -801,6 +801,8 @@
.allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
.mac_addr_set = ipn3ke_rpst_mac_addr_set,
.mtu_set = ipn3ke_rpst_mtu_set,
+
+ .tm_ops_get = ipn3ke_tm_ops_get,
};
static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
@@ -840,6 +842,9 @@ static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
return -ENODEV;
}
+ if (rpst->hw->tm_hw_enable)
+ ipn3ke_tm_init(rpst);
+
/* Set representor device ops */
ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
diff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c
new file mode 100644
index 0000000..8baa2fb
--- /dev/null
+++ b/drivers/net/ipn3ke/ipn3ke_tm.c
@@ -0,0 +1,2068 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_tm_driver.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_io.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_bus_ifpga.h>
+#include <ifpga_logs.h>
+
+#include "ipn3ke_rawdev_api.h"
+#include "ipn3ke_logs.h"
+#include "ipn3ke_ethdev.h"
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+#define SUBPORT_TC_PERIOD 10
+#define PIPE_TC_PERIOD 40
+
+struct ipn3ke_tm_shaper_params_range_type {
+ uint32_t m1;
+ uint32_t m2;
+ uint32_t exp;
+ uint32_t exp2;
+ uint32_t low;
+ uint32_t high;
+};
+struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
+ { 0, 1, 0, 1, 0, 4},
+ { 2, 3, 0, 1, 8, 12},
+ { 4, 7, 0, 1, 16, 28},
+ { 8, 15, 0, 1, 32, 60},
+ { 16, 31, 0, 1, 64, 124},
+ { 32, 63, 0, 1, 128, 252},
+ { 64, 127, 0, 1, 256, 508},
+ {128, 255, 0, 1, 512, 1020},
+ {256, 511, 0, 1, 1024, 2044},
+ {512, 1023, 0, 1, 2048, 4092},
+ {512, 1023, 1, 2, 4096, 8184},
+ {512, 1023, 2, 4, 8192, 16368},
+ {512, 1023, 3, 8, 16384, 32736},
+ {512, 1023, 4, 16, 32768, 65472},
+ {512, 1023, 5, 32, 65536, 130944},
+ {512, 1023, 6, 64, 131072, 261888},
+ {512, 1023, 7, 128, 262144, 523776},
+ {512, 1023, 8, 256, 524288, 1047552},
+ {512, 1023, 9, 512, 1048576, 2095104},
+ {512, 1023, 10, 1024, 2097152, 4190208},
+ {512, 1023, 11, 2048, 4194304, 8380416},
+ {512, 1023, 12, 4096, 8388608, 16760832},
+ {512, 1023, 13, 8192, 16777216, 33521664},
+ {512, 1023, 14, 16384, 33554432, 67043328},
+ {512, 1023, 15, 32768, 67108864, 134086656},
+};
+
+#define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
+ sizeof(struct ipn3ke_tm_shaper_params_range_type))
+
+#define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
+ (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
+
+#define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
+ (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
+
+int
+ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
+{
+#define SCRATCH_DATA 0xABCDEF
+ struct ipn3ke_tm_node *nodes;
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+ int node_num;
+ int i;
+
+ if (hw == NULL)
+ return -EINVAL;
+#if IPN3KE_TM_SCRATCH_RW
+ uint32_t scratch_data;
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_TM_SCRATCH,
+ 0,
+ SCRATCH_DATA,
+ 0xFFFFFFFF);
+ scratch_data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_TM_SCRATCH,
+ 0,
+ 0xFFFFFFFF);
+ if (scratch_data != SCRATCH_DATA)
+ return -EINVAL;
+#endif
+ /* alloc memory for all hierarchy nodes */
+ node_num = hw->port_num +
+ IPN3KE_TM_VT_NODE_NUM +
+ IPN3KE_TM_COS_NODE_NUM;
+
+ nodes = rte_zmalloc("ipn3ke_tm_nodes",
+ sizeof(struct ipn3ke_tm_node) * node_num,
+ 0);
+ if (!nodes)
+ return -ENOMEM;
+
+ /* alloc memory for Tail Drop Profile */
+ tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
+ sizeof(struct ipn3ke_tm_tdrop_profile) *
+ IPN3KE_TM_TDROP_PROFILE_NUM,
+ 0);
+ if (!tdrop_profile) {
+ rte_free(nodes);
+ return -ENOMEM;
+ }
+
+ hw->nodes = nodes;
+ hw->port_nodes = nodes;
+ hw->vt_nodes = hw->port_nodes + hw->port_num;
+ hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
+ hw->tdrop_profile = tdrop_profile;
+ hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
+
+ for (i = 0, nodes = hw->port_nodes;
+ i < hw->port_num;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, nodes = hw->vt_nodes;
+ i < IPN3KE_TM_VT_NODE_NUM;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, nodes = hw->cos_nodes;
+ i < IPN3KE_TM_COS_NODE_NUM;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, tdrop_profile = hw->tdrop_profile;
+ i < IPN3KE_TM_TDROP_PROFILE_NUM;
+ i++, tdrop_profile++) {
+ tdrop_profile->tdrop_profile_id = i;
+ tdrop_profile->n_users = 0;
+ tdrop_profile->valid = 0;
+ }
+
+ return 0;
+}
+
+void
+ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
+{
+ struct ipn3ke_tm_internals *tm;
+ struct ipn3ke_tm_node *port_node;
+
+ tm = &rpst->tm;
+
+ port_node = &rpst->hw->port_nodes[rpst->port_id];
+ tm->h.port_node = port_node;
+
+ tm->h.n_shaper_profiles = 0;
+ tm->h.n_tdrop_profiles = 0;
+ tm->h.n_vt_nodes = 0;
+ tm->h.n_cos_nodes = 0;
+
+ tm->h.port_commit_node = NULL;
+ TAILQ_INIT(&tm->h.vt_commit_node_list);
+ TAILQ_INIT(&tm->h.cos_commit_node_list);
+
+ tm->hierarchy_frozen = 0;
+ tm->tm_started = 1;
+ tm->tm_id = rpst->port_id;
+}
+
+static struct ipn3ke_tm_shaper_profile *
+ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
+ uint32_t shaper_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_shaper_profile *sp = NULL;
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+
+ /* Shaper profile ID must not be NONE. */
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return NULL;
+ }
+
+ level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index >= hw->port_num)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->port_nodes[node_index].shaper_profile;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->vt_nodes[node_index].shaper_profile;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->cos_nodes[node_index].shaper_profile;
+
+ break;
+ default:
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ }
+
+ return sp;
+}
+
+static struct ipn3ke_tm_tdrop_profile *
+ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
+ uint32_t tdrop_profile_id)
+{
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+
+ if (tdrop_profile_id >= hw->tdrop_profile_num)
+ return NULL;
+
+ tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
+ if (tdrop_profile->valid)
+ return tdrop_profile;
+
+ return NULL;
+}
+
+static struct ipn3ke_tm_node *
+ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
+ uint32_t node_id, uint32_t state_mask)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+ struct ipn3ke_tm_node *n;
+
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index >= hw->port_num)
+ return NULL;
+ n = &hw->port_nodes[node_index];
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return NULL;
+ n = &hw->vt_nodes[node_index];
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return NULL;
+ n = &hw->cos_nodes[node_index];
+
+ break;
+ default:
+ return NULL;
+ }
+
+ /* Check tm node status */
+ if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ if (n->tm_id != RTE_TM_NODE_ID_NULL ||
+ n->parent_node_id != RTE_TM_NODE_ID_NULL ||
+ n->parent_node != NULL ||
+ n->n_children > 0) {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+ } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
+ if (n->tm_id == RTE_TM_NODE_ID_NULL ||
+ (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
+ n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
+ (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
+ n->parent_node == NULL)) {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+ } else {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+
+ if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
+ if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
+ return n;
+ else if (n->tm_id == tm_id)
+ return n;
+ else
+ return NULL;
+ } else {
+ return NULL;
+ }
+}
+
+/* Traffic manager node type get */
+static int
+ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
+ uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *node;
+ uint32_t state_mask;
+
+ if (is_leaf == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_id = tm->tm_id;
+
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (node_id == RTE_TM_NODE_ID_NULL ||
+ node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
+
+ return 0;
+}
+
+#define WRED_SUPPORTED 0
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
+
+/* Traffic manager capabilities get */
+static int
+ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap, struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
+ cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
+
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 1;
+
+ cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+ cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->sched_sp_n_priorities_max = 3;
+ cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = UINT32_MAX;
+
+ cap->cman_wred_packet_mode_supported = 0;
+ cap->cman_wred_byte_mode_supported = 0;
+ cap->cman_head_drop_supported = 0;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+
+ /**
+ * cap->mark_vlan_dei_supported = {0, 0, 0};
+ * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
+ * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
+ * cap->mark_ip_dscp_supported = {0, 0, 0};
+ */
+
+ cap->dynamic_update_mask = 0;
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+/* Traffic manager level capabilities get */
+static int
+ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id, struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ cap->n_nodes_max = hw->port_num;
+ cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->nonleaf.shaper_private_supported = 0;
+ cap->nonleaf.shaper_private_dual_rate_supported = 0;
+ cap->nonleaf.shaper_private_rate_min = 1;
+ cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+ cap->nonleaf.shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 0;
+
+ cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->nonleaf.shaper_private_supported = 0;
+ cap->nonleaf.shaper_private_dual_rate_supported = 0;
+ cap->nonleaf.shaper_private_rate_min = 1;
+ cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+ cap->nonleaf.shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 0;
+
+ cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->leaf.shaper_private_supported = 0;
+ cap->leaf.shaper_private_dual_rate_supported = 0;
+ cap->leaf.shaper_private_rate_min = 0;
+ cap->leaf.shaper_private_rate_max = 0;
+ cap->leaf.shaper_shared_n_max = 0;
+
+ cap->leaf.cman_head_drop_supported = 0;
+ cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_byte_mode_supported = 0;
+ cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+
+ cap->leaf.stats_mask = STATS_MASK_QUEUE;
+ break;
+
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ }
+
+ return 0;
+}
+
+/* Traffic manager node capabilities get */
+static int
+ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id, struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *tm_node;
+ uint32_t state_mask;
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_id = tm->tm_id;
+
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (tm_node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (tm_node->tm_id != representor->port_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ switch (tm_node->level) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = UINT32_MAX;
+ cap->shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+
+ cap->stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = UINT32_MAX;
+ cap->shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+
+ cap->stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ cap->shaper_private_supported = 0;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 0;
+ cap->shaper_private_rate_max = 0;
+ cap->shaper_shared_n_max = 0;
+
+ cap->leaf.cman_head_drop_supported = 0;
+ cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_byte_mode_supported = 0;
+ cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+
+ cap->stats_mask = STATS_MASK_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
+ struct ipn3ke_tm_shaper_profile *local_profile,
+ const struct ipn3ke_tm_shaper_params_range_type *ref_data)
+{
+ uint32_t i;
+ const struct ipn3ke_tm_shaper_params_range_type *r;
+ uint64_t rate;
+
+ rate = profile->peak.rate;
+ for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
+ if (rate >= r->low &&
+ rate <= r->high) {
+ local_profile->m = (rate / 4) / r->exp2;
+ local_profile->e = r->exp;
+ local_profile->rate = rate;
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int
+ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_shaper_profile *sp;
+
+ /* Shaper profile must not exist. */
+ sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
+ if (!sp || (sp && sp->valid))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak rate: non-zero, 32-bit */
+ if (profile->peak.rate == 0 ||
+ profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak size: non-zero, 32-bit */
+ if (profile->peak.size != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Dual-rate profiles are not supported. */
+ if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Packet length adjust: 24 bytes */
+ if (profile->pkt_length_adjust != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (ipn3ke_tm_shaper_parame_trans(profile,
+ sp,
+ ipn3ke_tm_shaper_params_rang)) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ sp->valid = 1;
+ rte_memcpy(&sp->params, profile, sizeof(sp->params));
+ }
+
+ tm->h.n_shaper_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_shaper_profile *sp;
+
+ /* Check existing */
+ sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
+ if (!sp || (sp && !sp->valid))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ sp->valid = 0;
+ tm->h.n_shaper_profiles--;
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ enum rte_color color;
+
+ /* TDROP profile ID must not be NONE. */
+ if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* TDROP profile should be in packet mode */
+ if (profile->packet_mode != 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
+ uint64_t min_th = profile->red_params[color].min_th;
+ uint64_t max_th = profile->red_params[color].max_th;
+
+ if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
+ IPN3KE_TDROP_TH1_SHIFT) ||
+ max_th != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
+ struct ipn3ke_tm_tdrop_profile *tp)
+{
+ if (tp->valid) {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_MS,
+ 0,
+ tp->th2,
+ IPN3KE_CCB_PROFILE_MS_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_P,
+ tp->tdrop_profile_id,
+ tp->th1,
+ IPN3KE_CCB_PROFILE_MASK);
+ } else {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_MS,
+ 0,
+ 0,
+ IPN3KE_CCB_PROFILE_MS_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_P,
+ tp->tdrop_profile_id,
+ 0,
+ IPN3KE_CCB_PROFILE_MASK);
+ }
+
+ return 0;
+}
+
+/* Traffic manager TDROP profile add */
+static int
+ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_tdrop_profile *tp;
+ int status;
+ uint64_t min_th;
+ uint32_t th1, th2;
+
+ /* Check input params */
+ status = ipn3ke_tm_tdrop_profile_check(dev,
+ tdrop_profile_id,
+ profile,
+ error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ tp = &hw->tdrop_profile[tdrop_profile_id];
+
+ /* Fill in */
+ tp->valid = 1;
+ min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
+ th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
+ th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
+ IPN3KE_TDROP_TH2_MASK);
+ tp->th1 = th1;
+ tp->th2 = th2;
+ rte_memcpy(&tp->params, profile, sizeof(tp->params));
+
+ /* Add to list */
+ tm->h.n_tdrop_profiles++;
+
+ /* Write FPGA */
+ ipn3ke_hw_tm_tdrop_wr(hw, tp);
+
+ return 0;
+}
+
+/* Traffic manager TDROP profile delete */
+static int
+ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_tdrop_profile *tp;
+
+ /* Check existing */
+ tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
+ if (tp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (tp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Set free */
+ tp->valid = 0;
+ tm->h.n_tdrop_profiles--;
+
+ /* Write FPGA */
+ ipn3ke_hw_tm_tdrop_wr(hw, tp);
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+ uint32_t parent_level_id;
+
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* priority: must be 0, 1, 2, 3 */
+ if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* check node id and parent id*/
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ if (level_of_node_id != level_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_node_id != RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* params: must not be NULL */
+ if (params == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ NULL,
+ rte_strerror(EINVAL));
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
+ struct rte_tm_error *error)
+{
+ /*struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);*/
+ uint32_t node_index;
+ uint32_t parent_index;
+ uint32_t parent_index1;
+
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (parent_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (parent_index != parent_index1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+ipn3ke_tm_node_add(struct rte_eth_dev *dev,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *n, *parent_node;
+ uint32_t node_state, state_mask;
+ int status;
+
+ /* Checks */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ tm_id = tm->tm_id;
+
+ status = ipn3ke_tm_node_add_check_parameter(tm_id,
+ node_id,
+ parent_node_id,
+ priority,
+ weight,
+ level_id,
+ params,
+ error);
+ if (status)
+ return status;
+
+ status = ipn3ke_tm_node_add_check_mount(tm_id,
+ node_id,
+ parent_node_id,
+ level_id,
+ error);
+ if (status)
+ return status;
+
+ /* Shaper profile ID must not be NONE. */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
+ params->shaper_profile_id != node_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Memory allocation */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
+ n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (!n)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ node_state = n->node_state;
+
+ /* Check parent node */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+ parent_node = ipn3ke_hw_tm_node_search(hw,
+ tm_id,
+ parent_node_id,
+ state_mask);
+ if (!parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ parent_node = NULL;
+ }
+
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->tm_id = tm_id;
+ tm->h.port_commit_node = n;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_vt_nodes++;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_vt_nodes++;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->parent_node_id = parent_node_id;
+ n->tm_id = tm_id;
+ n->parent_node = parent_node;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
+ n, node);
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_cos_nodes++;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_cos_nodes++;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->parent_node_id = parent_node_id;
+ n->tm_id = tm_id;
+ n->parent_node = parent_node;
+
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Fill in */
+ n->priority = priority;
+ n->weight = weight;
+
+ if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
+ params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
+ n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
+ params->leaf.wred.wred_profile_id);
+
+ rte_memcpy(&n->params, params, sizeof(n->params));
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
+ uint32_t node_id, struct rte_tm_error *error)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* check node id and parent id*/
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
+ uint32_t node_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node *n, *parent_node;
+ uint32_t tm_id;
+ int status;
+ uint32_t level_of_node_id;
+ uint32_t node_state;
+ uint32_t state_mask;
+
+ /* Check hierarchy changes are currently allowed */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ tm_id = tm->tm_id;
+
+ status = ipn3ke_tm_node_del_check_parameter(tm_id,
+ node_id,
+ error);
+ if (status)
+ return status;
+
+ /* Check existing */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (n->n_children > 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ node_state = n->node_state;
+
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+
+ /* Check parent node */
+ if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ parent_node = ipn3ke_hw_tm_node_search(hw,
+ tm_id,
+ n->parent_node_id,
+ state_mask);
+ if (!parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (n->parent_node != parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ parent_node = NULL;
+ }
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (tm->h.port_node != n)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+ tm->h.port_commit_node = n;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
+ if (parent_node)
+ TAILQ_REMOVE(&parent_node->children_node_list,
+ n, node);
+ TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_vt_nodes--;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_vt_nodes--;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
+ if (parent_node)
+ TAILQ_REMOVE(&parent_node->children_node_list,
+ n, node);
+ TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
+ n, node);
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_cos_nodes--;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_cos_nodes--;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n, *parent_node;
+
+ tm_id = tm->tm_id;
+
+ nl = &tm->h.cos_commit_node_list;
+ TAILQ_FOREACH(n, nl, node) {
+ parent_node = n->parent_node;
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_COS ||
+ n->tm_id != tm_id ||
+ parent_node == NULL ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_IDLE) ||
+ n->shaper_profile.valid == 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
+ n->n_children != 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ TAILQ_FOREACH(n, nl, node) {
+ parent_node = n->parent_node;
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_VT ||
+ n->tm_id != tm_id ||
+ parent_node == NULL ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_IDLE) ||
+ n->shaper_profile.valid == 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (n->level != IPN3KE_TM_NODE_LEVEL_VT ||
+ n->n_children != 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+ }
+
+ n = tm->h.port_commit_node;
+ if (n &&
+ (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
+ n->tm_id != tm_id ||
+ n->parent_node != NULL ||
+ n->shaper_profile.valid == 0)) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
+ struct ipn3ke_tm_node *n)
+{
+ uint32_t level;
+
+ level = n->level;
+
+ switch (level) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L3_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L3_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L3_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L2_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L2_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L2_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ /**
+ * Configure Map
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_MAP_L2_X,
+ n->node_index,
+ n->parent_node->node_index,
+ IPN3KE_QOS_MAP_L2_MASK);
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ /**
+ * Configure Tail Drop mapping
+ */
+ if (n->tdrop_profile && n->tdrop_profile->valid) {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_QPROFILE_Q,
+ n->node_index,
+ n->tdrop_profile->tdrop_profile_id,
+ IPN3KE_CCB_QPROFILE_MASK);
+ }
+
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L1_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L1_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L1_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ /**
+ * Configure COS queue to port
+ */
+ while (IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ 0x80000000))
+ ;
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QM_UID_CONFIG_DATA,
+ 0,
+ (1 << 8 | n->parent_node->parent_node->node_index),
+ 0x1FF);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ n->node_index,
+ 0xFFFFF);
+
+ while (IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ 0x80000000))
+ ;
+
+ /**
+ * Configure Map
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_MAP_L1_X,
+ n->node_index,
+ n->parent_node->node_index,
+ IPN3KE_QOS_MAP_L1_MASK);
+
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n, *nn, *parent_node;
+
+ n = tm->h.port_commit_node;
+ if (n) {
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ tm->h.port_commit_node = NULL;
+
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ tm->h.port_commit_node = NULL;
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ ipn3ke_hw_tm_node_wr(hw, n);
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+ TAILQ_INSERT_TAIL(&parent_node->children_node_list,
+ n, node);
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ ipn3ke_hw_tm_node_wr(hw, n);
+ }
+
+ nl = &tm->h.cos_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+ TAILQ_INSERT_TAIL(&parent_node->children_node_list,
+ n, node);
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+
+ if (n->tdrop_profile)
+ n->tdrop_profile->n_users--;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ ipn3ke_hw_tm_node_wr(hw, n);
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n;
+ struct ipn3ke_tm_node *nn;
+
+ n = tm->h.port_commit_node;
+ if (n) {
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->n_children = 0;
+
+ tm->h.port_commit_node = NULL;
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ n->n_children = 0;
+ tm->h.n_vt_nodes--;
+
+ TAILQ_REMOVE(nl, n, node);
+ }
+
+ nl = &tm->h.cos_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ tm->h.n_cos_nodes--;
+
+ TAILQ_REMOVE(nl, n, node);
+ }
+
+ return 0;
+}
+
+static void
+ipn3ke_tm_show(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
+ struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
+ const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
+ "CfgAdd",
+ "CfgDel",
+ "Committed"};
+
+ tm_id = tm->tm_id;
+
+ IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
+
+ port_n = tm->h.port_node;
+ IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
+ str_state[port_n->node_state]);
+
+ vt_nl = &tm->h.port_node->children_node_list;
+ TAILQ_FOREACH(vt_n, vt_nl, node) {
+ cos_nl = &vt_n->children_node_list;
+ IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
+ TAILQ_FOREACH(cos_n, cos_nl, node) {
+ if (cos_n->parent_node_id !=
+ (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
+ IPN3KE_AFU_PMD_ERR("(%d|%s), ",
+ cos_n->node_index,
+ str_state[cos_n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+ }
+}
+
+static void
+ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n;
+ const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
+ "CfgAdd",
+ "CfgDel",
+ "Committed"};
+
+ tm_id = tm->tm_id;
+
+ IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
+ n = tm->h.port_commit_node;
+ IPN3KE_AFU_PMD_DEBUG("Port: ");
+ if (n)
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
+ n->node_index,
+ str_state[n->node_state]);
+ IPN3KE_AFU_PMD_DEBUG("\n");
+
+ nl = &tm->h.vt_commit_node_list;
+ IPN3KE_AFU_PMD_DEBUG("VT : ");
+ TAILQ_FOREACH(n, nl, node) {
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
+ n->node_index,
+ str_state[n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+
+ nl = &tm->h.cos_commit_node_list;
+ IPN3KE_AFU_PMD_DEBUG("COS : ");
+ TAILQ_FOREACH(n, nl, node) {
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
+ n->node_index,
+ str_state[n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+}
+
+/* Traffic manager hierarchy commit */
+static int
+ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail, struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ int status;
+
+ /* Checks */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ ipn3ke_tm_show_commmit(dev);
+
+ status = ipn3ke_tm_hierarchy_commit_check(dev, error);
+ if (status) {
+ if (clear_on_fail)
+ ipn3ke_tm_hierarchy_commit_clear(dev);
+ return status;
+ }
+
+ ipn3ke_tm_hierarchy_hw_commit(dev, error);
+ ipn3ke_tm_show(dev);
+
+ return 0;
+}
+
+const struct rte_tm_ops ipn3ke_tm_ops = {
+ .node_type_get = ipn3ke_pmd_tm_node_type_get,
+ .capabilities_get = ipn3ke_tm_capabilities_get,
+ .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
+ .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
+
+ .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
+ .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
+ .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
+ .shared_shaper_add_update = NULL,
+ .shared_shaper_delete = NULL,
+
+ .node_add = ipn3ke_tm_node_add,
+ .node_delete = ipn3ke_pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
+
+ .node_parent_update = NULL,
+ .node_shaper_update = NULL,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = NULL,
+};
+
+int
+ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
+ void *arg)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct rte_eth_dev *i40e_pf_eth;
+ const struct rte_tm_ops *ops;
+
+ if (!arg)
+ return -EINVAL;
+
+ if (hw->acc_tm) {
+ *(const void **)arg = &ipn3ke_tm_ops;
+ } else if (rpst->i40e_pf_eth) {
+ i40e_pf_eth = rpst->i40e_pf_eth;
+ if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
+ i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
+ &ops) != 0 ||
+ ops == NULL) {
+ return -EINVAL;
+ }
+ *(const void **)arg = ops;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/ipn3ke/meson.build b/drivers/net/ipn3ke/meson.build
index ec77390..3a95efc 100644
--- a/drivers/net/ipn3ke/meson.build
+++ b/drivers/net/ipn3ke/meson.build
@@ -11,5 +11,6 @@
allow_experimental_apis = true
sources += files('ipn3ke_ethdev.c',
- 'ipn3ke_representor.c')
+ 'ipn3ke_representor.c',
+ 'ipn3ke_tm.c')
deps += ['bus_ifpga', 'sched']
--
1.8.3.1
next prev parent reply other threads:[~2019-04-15 5:07 UTC|newest]
Thread overview: 343+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-28 7:13 [dpdk-dev] [PATCH v1 00/11] Add patch set for IPN3KE Rosen Xu
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 01/11] drivers/bus/ifpga: add AFU shared data Rosen Xu
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 02/11] drivers/bus/ifpga: add function for AFU search by name Rosen Xu
2019-03-06 12:44 ` Ferruh Yigit
2019-03-11 13:10 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 03/11] drivers/raw/ifpga_rawdev: add OPAE share code for IPN3KE Rosen Xu
2019-03-06 12:27 ` Ferruh Yigit
2019-03-06 13:59 ` Zhang, Tianfei
2019-03-06 17:54 ` Ferruh Yigit
2019-03-06 23:59 ` Zhang, Tianfei
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 04/11] drivers/raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-03-06 12:31 ` Ferruh Yigit
2019-03-07 7:16 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 05/11] drivers/net/ipn3ke: add IPN3KE PMD driver Rosen Xu
2019-02-28 8:32 ` David Marchand
2019-02-28 8:44 ` Xu, Rosen
2019-03-06 12:44 ` Ferruh Yigit
2019-03-11 13:09 ` Xu, Rosen
2019-03-09 19:34 ` Luca Boccassi
2019-03-11 3:05 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 06/11] config: add build enablement for IPN3KE Rosen Xu
2019-03-06 12:45 ` Yigit, Ferruh
2019-03-08 0:58 ` Xu, Rosen
2019-03-09 9:36 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 07/11] mk: add link " Rosen Xu
2019-03-06 12:45 ` Yigit, Ferruh
2019-03-08 0:56 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 08/11] app/test-pmd: add IPN3KE support for testpmd Rosen Xu
2019-02-28 8:37 ` David Marchand
2019-02-28 8:45 ` Xu, Rosen
2019-02-28 8:47 ` David Marchand
2019-03-01 5:56 ` Xu, Rosen
2019-03-06 12:45 ` Yigit, Ferruh
2019-03-08 0:55 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 09/11] usertools: add IPN3KE device bind Rosen Xu
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 10/11] doc: add IPN3KE document Rosen Xu
2019-03-06 12:45 ` Ferruh Yigit
2019-03-08 2:41 ` Xu, Rosen
2019-02-28 7:13 ` [dpdk-dev] [PATCH v1 11/11] MAINTAINERS: add MAINTAINERS for IPN3KE Rosen Xu
2019-03-06 12:46 ` Ferruh Yigit
2019-03-08 0:55 ` Xu, Rosen
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 00/15] Add patch set " Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 01/15] drivers/bus/ifpga: add AFU shared data Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 02/15] drivers/bus/ifpga: add function for AFU search by name Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 03/15] drivers/net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 04/15] drivers/net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 05/15] drivers/net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 06/15] drivers/net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 07/15] raw/ifpga/base: clean up code for ifpga share code Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 08/15] raw/ifpga/base: store private features in FME and Port list Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 09/15] raw/ifpga/base: add SPI and MAX10 device driver Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 10/15] raw/ifpga/base: add I2C and at24 EEPROM driver Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 11/15] raw/ifpga/base: add eth group driver Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 12/15] raw/ifpga/base: add device tree support Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 13/15] raw/ifpga/base: add version description on README Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 14/15] raw/ifpga/base: using prefix name "ifpga_" for feature and feature_ops data struct Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 15:58 ` [dpdk-dev] [PATCH v2 15/15] drivers/raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-03-29 15:58 ` Rosen Xu
2019-03-29 18:59 ` [dpdk-dev] [PATCH v2 00/15] Add patch set for IPN3KE Ferruh Yigit
2019-03-29 18:59 ` Ferruh Yigit
2019-03-31 14:19 ` Xu, Rosen
2019-03-31 14:19 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 00/14] " Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 01/14] drivers/bus/ifpga: add AFU shared data Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 02/14] drivers/bus/ifpga: add function for AFU search by name Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 03/14] drivers/net/ipn3ke: add IPN3KE ethdev PMD driver Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 04/14] drivers/net/ipn3ke: add IPN3KE representor of " Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 05/14] drivers/net/ipn3ke: add IPN3KE TM " Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 06/14] drivers/net/ipn3ke: add IPN3KE Flow " Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 07/14] raw/ifpga/base: clean up code for ifpga share code Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 08/14] raw/ifpga/base: store private features in FME and Port list Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 09/14] raw/ifpga/base: add SPI and MAX10 device driver Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 10/14] raw/ifpga/base: add I2C and at24 EEPROM driver Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 11/14] raw/ifpga/base: add eth group driver Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 12/14] raw/ifpga/base: add version description on README Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 13/14] raw/ifpga/base: using prefix name "ifpga_" for feature and feature_ops data struct Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-02 13:57 ` [dpdk-dev] [PATCH v3 14/14] drivers/raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Xu, Rosen
2019-04-02 13:57 ` Xu, Rosen
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 00/14] Add patch set for IPN3KE Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 01/14] drivers/bus/ifpga: add AFU shared data Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 02/14] drivers/bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 03/14] drivers/net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 04/14] drivers/net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 05/14] drivers/net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 06/14] drivers/net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 07/14] raw/ifpga/base: clean up code for ifpga share code Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 08/14] raw/ifpga/base: store private features in FME and Port list Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 09/14] raw/ifpga/base: add SPI and MAX10 device driver Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 10/14] raw/ifpga/base: add I2C and at24 EEPROM driver Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 11/14] raw/ifpga/base: add eth group driver Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 12/14] raw/ifpga/base: add version description on README Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 13/14] raw/ifpga/base: using prefix name "ifpga_" for feature and feature_ops data struct Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 8:46 ` [dpdk-dev] [PATCH v4 14/14] drivers/raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-03 8:46 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 00/14] Add patch set for IPN3KE Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 01/14] drivers/bus/ifpga: add AFU shared data Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 02/14] drivers/bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 03/14] drivers/net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-04 19:38 ` Ferruh Yigit
2019-04-04 19:38 ` Ferruh Yigit
2019-04-08 7:29 ` Xu, Rosen
2019-04-08 7:29 ` Xu, Rosen
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 04/14] drivers/net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-04 19:02 ` Ferruh Yigit
2019-04-04 19:02 ` Ferruh Yigit
2019-04-08 7:40 ` Xu, Rosen
2019-04-08 7:40 ` Xu, Rosen
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 05/14] drivers/net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 06/14] drivers/net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 07/14] raw/ifpga/base: clean up code for ifpga share code Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 08/14] raw/ifpga/base: store private features in FME and Port list Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 09/14] raw/ifpga/base: add SPI and MAX10 device driver Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 10/14] raw/ifpga/base: add I2C and at24 EEPROM driver Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 11/14] raw/ifpga/base: add eth group driver Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 12/14] raw/ifpga/base: add version description on README Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 13/14] raw/ifpga/base: using prefix name "ifpga_" for feature and feature_ops data struct Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-03 11:47 ` [dpdk-dev] [PATCH v5 14/14] drivers/raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-03 11:47 ` Rosen Xu
2019-04-04 18:37 ` [dpdk-dev] [PATCH v5 00/14] Add patch set for IPN3KE Ferruh Yigit
2019-04-04 18:37 ` Ferruh Yigit
2019-04-08 7:19 ` Xu, Rosen
2019-04-08 7:19 ` Xu, Rosen
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 " Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 01/14] bus/ifpga: add AFU shared data Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 02/14] bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 03/14] net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 15:18 ` Stephen Hemminger
2019-04-09 15:18 ` Stephen Hemminger
2019-04-10 6:03 ` Xu, Rosen
2019-04-10 6:03 ` Xu, Rosen
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 04/14] net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 05/14] net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 06/14] net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 07/14] raw/ifpga_rawdev: clean up code for ifpga share code Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 08/14] raw/ifpga_rawdev: store private features in FME and Port Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 09/14] raw/ifpga_rawdev: add SPI and MAX10 device driver Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 10/14] raw/ifpga_rawdev: add I2C and at24 EEPROM driver Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 11/14] raw/ifpga_rawdev: add eth group driver Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 12/14] raw/ifpga_rawdev: add version description on README Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 13/14] raw/ifpga_rawdev: using prefix name for feature and its ops Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-09 12:41 ` [dpdk-dev] [PATCH v6 14/14] raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-09 12:41 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 00/14] Add patch set for IPN3KE Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 01/14] bus/ifpga: add AFU shared data Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 02/14] bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 03/14] net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 04/14] net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 05/14] net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 06/14] net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 07/14] raw/ifpga_rawdev: clean up code for ifpga share code Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 08/14] raw/ifpga_rawdev: store private features in FME and Port Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 09/14] raw/ifpga_rawdev: add SPI and MAX10 device driver Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 10/14] raw/ifpga_rawdev: add I2C and at24 EEPROM driver Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 11/14] raw/ifpga_rawdev: add eth group driver Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 12/14] raw/ifpga_rawdev: add version description on README Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 13/14] raw/ifpga_rawdev: using prefix name for feature and its ops Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-10 6:27 ` [dpdk-dev] [PATCH v7 14/14] raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-10 6:27 ` Rosen Xu
2019-04-12 16:52 ` [dpdk-dev] [PATCH v7 00/14] Add patch set for IPN3KE Ferruh Yigit
2019-04-12 16:52 ` Ferruh Yigit
2019-04-15 5:07 ` Xu, Rosen
2019-04-15 5:07 ` Xu, Rosen
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 " Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 01/14] bus/ifpga: add AFU shared data Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 02/14] bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:37 ` Xu, Rosen
2019-04-15 12:37 ` Xu, Rosen
2019-04-16 2:50 ` Xu, Rosen
2019-04-16 2:50 ` Xu, Rosen
2019-04-16 8:09 ` Thomas Monjalon
2019-04-16 8:09 ` Thomas Monjalon
2019-04-16 8:15 ` Xu, Rosen
2019-04-16 8:15 ` Xu, Rosen
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 03/14] net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:43 ` Xu, Rosen
2019-04-15 12:43 ` Xu, Rosen
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 04/14] net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` Rosen Xu [this message]
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 05/14] net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 06/14] net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 07/14] raw/ifpga_rawdev: clean up code for ifpga share code Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 08/14] raw/ifpga_rawdev: store private features in FME and Port Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 09/14] raw/ifpga_rawdev: add SPI and MAX10 device driver Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:06 ` [dpdk-dev] [PATCH v8 10/14] raw/ifpga_rawdev: add I2C and at24 EEPROM driver Rosen Xu
2019-04-15 5:06 ` Rosen Xu
2019-04-15 5:07 ` [dpdk-dev] [PATCH v8 11/14] raw/ifpga_rawdev: add eth group driver Rosen Xu
2019-04-15 5:07 ` Rosen Xu
2019-04-15 5:07 ` [dpdk-dev] [PATCH v8 12/14] raw/ifpga_rawdev: add version description on README Rosen Xu
2019-04-15 5:07 ` Rosen Xu
2019-04-15 5:07 ` [dpdk-dev] [PATCH v8 13/14] raw/ifpga_rawdev: using prefix name for feature and its ops Rosen Xu
2019-04-15 5:07 ` Rosen Xu
2019-04-15 5:07 ` [dpdk-dev] [PATCH v8 14/14] raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-15 5:07 ` Rosen Xu
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:55 ` Xu, Rosen
2019-04-15 12:55 ` Xu, Rosen
2019-04-15 12:28 ` [dpdk-dev] [PATCH v8 00/14] Add patch set for IPN3KE Ferruh Yigit
2019-04-15 12:28 ` Ferruh Yigit
2019-04-15 12:34 ` Xu, Rosen
2019-04-15 12:34 ` Xu, Rosen
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 " Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 01/14] bus/ifpga: add AFU shared data Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 02/14] bus/ifpga: add function for AFU search by name Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 8:48 ` Ferruh Yigit
2019-04-16 8:48 ` Ferruh Yigit
2019-04-16 11:18 ` Xu, Rosen
2019-04-16 11:18 ` Xu, Rosen
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 03/14] net/ipn3ke: add IPN3KE ethdev PMD driver Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2023-03-21 20:19 ` Ferruh Yigit
2023-03-22 1:37 ` Xu, Rosen
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 04/14] net/ipn3ke: add IPN3KE representor of " Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 05/14] net/ipn3ke: add IPN3KE TM " Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 06/14] net/ipn3ke: add IPN3KE Flow " Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 07/14] raw/ifpga_rawdev: clean up code for ifpga share code Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 08/14] raw/ifpga_rawdev: store private features in FME and Port Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 09/14] raw/ifpga_rawdev: add SPI and MAX10 device driver Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 10/14] raw/ifpga_rawdev: add I2C and at24 EEPROM driver Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 11/14] raw/ifpga_rawdev: add eth group driver Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 12/14] raw/ifpga_rawdev: add version description on README Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 13/14] raw/ifpga_rawdev: using prefix name for feature and its ops Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 3:17 ` [dpdk-dev] [PATCH v9 14/14] raw/ifpga_rawdev: add IPN3KE support for IFPGA Rawdev Rosen Xu
2019-04-16 3:17 ` Rosen Xu
2019-04-16 9:00 ` [dpdk-dev] [PATCH v9 00/14] Add patch set for IPN3KE Ferruh Yigit
2019-04-16 9:00 ` Ferruh Yigit
2019-04-16 9:05 ` Ferruh Yigit
2019-04-16 9:05 ` Ferruh Yigit
2019-04-16 9:06 ` Xu, Rosen
2019-04-16 9:06 ` Xu, Rosen
2019-04-16 9:06 ` Xu, Rosen
2019-04-16 9:06 ` Xu, Rosen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1555304823-91456-6-git-send-email-rosen.xu@intel.com \
--to=rosen.xu@intel.com \
--cc=andy.pei@intel.com \
--cc=dan.wei@intel.com \
--cc=david.lomartire@intel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=haiyue.wang@intel.com \
--cc=jia.hu@intel.com \
--cc=qiming.yang@intel.com \
--cc=santos.chen@intel.com \
--cc=tianfei.zhang@intel.com \
--cc=zhang.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).