* [dpdk-dev] [PATCH 1/2] net/softnic: enable flow classification function
2017-11-30 20:08 [dpdk-dev] [PATCH 0/2] net/softnic: add flow classification support Jasvinder Singh
@ 2017-11-30 20:08 ` Jasvinder Singh
2017-12-07 21:02 ` Ferruh Yigit
2017-11-30 20:08 ` [dpdk-dev] [PATCH 2/2] net/softnic: add flow classification ops Jasvinder Singh
2017-12-07 21:02 ` [dpdk-dev] [PATCH 0/2] net/softnic: add flow classification support Ferruh Yigit
2 siblings, 1 reply; 8+ messages in thread
From: Jasvinder Singh @ 2017-11-30 20:08 UTC (permalink / raw)
To: dev; +Cc: cristian.dumitrescu, ferruh.yigit
Enables flow classification on softnic rx path so that proceding
functions of the packet processing pipeline such as metering and
policing could be implemented.
Example: Create "soft" port for "hard" port "0000:02:00.1",
enable the Flow Classification (FC) feature with default
settings:
--vdev 'net_softnic0,hard_name=0000:02:00.1,soft_fc=on'
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
drivers/net/softnic/Makefile | 2 +-
drivers/net/softnic/rte_eth_softnic.c | 389 +++++++++++++++++++++++-
drivers/net/softnic/rte_eth_softnic.h | 16 +
drivers/net/softnic/rte_eth_softnic_fc.c | 377 +++++++++++++++++++++++
drivers/net/softnic/rte_eth_softnic_internals.h | 162 +++++++++-
mk/rte.app.mk | 2 +
6 files changed, 937 insertions(+), 11 deletions(-)
create mode 100644 drivers/net/softnic/rte_eth_softnic_fc.c
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index 09ed62e..4f7f6c9 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -51,7 +51,7 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
-
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_fc.c
#
# Export include files
#
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 3e47c2f..a5779d3 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -61,6 +61,12 @@
#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
+#define PMD_PARAM_SOFT_FC "soft_fc"
+#define PMD_PARAM_SOFT_FC_NB_RULES "soft_fc_nb_rules"
+#define PMD_PARAM_SOFT_FC_FLOW_KEY_SIZE "soft_fc_flow_key_size"
+#define PMD_PARAM_SOFT_FC_FLOW_KEY_OFFSET "soft_fc_flow_key_offset"
+#define PMD_PARAM_SOFT_FC_FLOW_KEY_MASK "soft_fc_flow_key_mask"
+
#define PMD_PARAM_HARD_NAME "hard_name"
#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
@@ -74,6 +80,11 @@ static const char *pmd_valid_args[] = {
PMD_PARAM_SOFT_TM_QSIZE3,
PMD_PARAM_SOFT_TM_ENQ_BSZ,
PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ PMD_PARAM_SOFT_FC,
+ PMD_PARAM_SOFT_FC_NB_RULES,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_SIZE,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_OFFSET,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_MASK,
PMD_PARAM_HARD_NAME,
PMD_PARAM_HARD_TX_QUEUE_ID,
NULL
@@ -96,6 +107,15 @@ static const struct rte_eth_dev_info pmd_dev_info = {
},
};
+static __rte_always_inline int
+run_tm(struct rte_eth_dev *dev);
+
+static __rte_always_inline int
+run_default(struct rte_eth_dev *dev);
+
+static __rte_always_inline int
+run_fc(struct rte_eth_dev *dev);
+
static void
pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
@@ -121,14 +141,28 @@ pmd_dev_configure(struct rte_eth_dev *dev)
static int
pmd_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
+ uint16_t nb_rx_desc,
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool __rte_unused)
{
struct pmd_internals *p = dev->data->dev_private;
- if (p->params.soft.intrusive == 0) {
+ if (p->params.soft.intrusive == 0 && fc_enabled(dev)) {
+ uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_rxq") + 4;
+ char name[size];
+ struct rte_ring *r;
+
+ snprintf(name, sizeof(name), "%s_rxq%04x",
+ dev->data->name, rx_queue_id);
+ r = rte_ring_create(name, nb_rx_desc, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (r == NULL)
+ return -1;
+
+ dev->data->rx_queues[rx_queue_id] = r;
+
+ } else if (p->params.soft.intrusive == 0) {
struct pmd_rx_queue *rxq;
rxq = rte_zmalloc_socket(p->params.soft.name,
@@ -177,12 +211,34 @@ static int
pmd_dev_start(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tx_ops_flag = p->params.tx_ops_data.ops_flag;
+ uint32_t rx_ops_flag = p->params.rx_ops_data.ops_flag;
+ uint32_t nb_tx_ops = 0, nb_rx_ops = 0;
+
+ memset(&p->params.tx_ops_data.ops, 0,
+ MAX_NB_OPS * sizeof(*p->params.tx_ops_data.ops));
+ memset(&p->params.rx_ops_data.ops, 0,
+ MAX_NB_OPS * sizeof(*p->params.rx_ops_data.ops));
if (tm_used(dev)) {
int status = tm_start(p);
if (status)
return status;
+
+ /** Set run time tx ops data*/
+ if (!(tx_ops_flag & PMD_FEATURE_TM)) {
+ p->params.tx_ops_data.ops_flag |= PMD_FEATURE_TM;
+ p->params.tx_ops_data.ops[nb_tx_ops++] = run_tm;
+ }
+ }
+
+ if (fc_enabled(dev)) {
+ /** Set run time rx ops data */
+ if (!(rx_ops_flag & PMD_FEATURE_FC)) {
+ p->params.rx_ops_data.ops_flag |= PMD_FEATURE_FC;
+ p->params.rx_ops_data.ops[nb_rx_ops++] = run_fc;
+ }
}
dev->data->dev_link.link_status = ETH_LINK_UP;
@@ -194,6 +250,12 @@ pmd_dev_start(struct rte_eth_dev *dev)
dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
}
+ /* Set default if no tx ops */
+ if (nb_tx_ops == 0)
+ p->params.tx_ops_data.ops[nb_tx_ops++] = run_default;
+
+ p->params.rx_ops_data.nb_ops = nb_rx_ops;
+ p->params.tx_ops_data.nb_ops = nb_tx_ops;
return 0;
}
@@ -234,6 +296,18 @@ pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
return 0;
}
+static int
+pmd_flow_ops_get(struct rte_eth_dev *dev,
+ __rte_unused enum rte_filter_type filter_type,
+ __rte_unused enum rte_filter_op filter_op,
+ void *arg)
+{
+ *(const struct rte_flow_ops **)arg =
+ (fc_enabled(dev)) ? &pmd_flow_ops : NULL;
+
+ return 0;
+}
+
static const struct eth_dev_ops pmd_ops = {
.dev_configure = pmd_dev_configure,
.dev_start = pmd_dev_start,
@@ -244,10 +318,11 @@ static const struct eth_dev_ops pmd_ops = {
.rx_queue_setup = pmd_rx_queue_setup,
.tx_queue_setup = pmd_tx_queue_setup,
.tm_ops_get = pmd_tm_ops_get,
+ .filter_ctrl = pmd_flow_ops_get,
};
static uint16_t
-pmd_rx_pkt_burst(void *rxq,
+pmd_rx_pkt_burst_default(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
@@ -260,6 +335,19 @@ pmd_rx_pkt_burst(void *rxq,
}
static uint16_t
+pmd_rx_pkt_burst(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_ring *rx_queue = rxq;
+
+ return (uint16_t)rte_ring_sc_dequeue_burst(rx_queue,
+ (void **)rx_pkts,
+ nb_pkts,
+ NULL);
+}
+
+static uint16_t
pmd_tx_pkt_burst(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -406,16 +494,138 @@ run_tm(struct rte_eth_dev *dev)
return 0;
}
-int
-rte_pmd_softnic_run(uint16_t port_id)
+static __rte_always_inline int
+fc_flow_classifier_run(struct pmd_internals *p,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ uint32_t pkt_offset)
+{
+ struct rte_flow_action_queue *queue;
+ struct fc_table_entry *entry;
+ uint64_t pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ uint32_t nb_tables = p->soft.fc.nb_tables;
+ uint32_t table_mask = p->soft.fc.table_mask;
+ uint64_t action_mask;
+ uint32_t *ptr, i, j;
+ int ret = -EINVAL;
+
+ for (i = 0; i < nb_tables; i++) {
+ if (table_mask & (1LU << i)) {
+ struct fc_table *table = &p->soft.fc.tables[i];
+ uint64_t lookup_hit_mask;
+
+ ret = table->ops.f_lookup(table->h_table,
+ pkts, pkts_mask, &lookup_hit_mask,
+ (void **)p->soft.fc.entries);
+ if (ret)
+ return ret;
+
+ if (lookup_hit_mask) {
+ for (j = 0; j < nb_pkts; j++) {
+ uint64_t pkt_mask = 1LLU << j;
+
+ if ((lookup_hit_mask & pkt_mask) == 0)
+ continue;
+
+ /* Meta-data */
+ enum rte_flow_action_type act_type =
+ RTE_FLOW_ACTION_TYPE_QUEUE;
+ entry = p->soft.fc.entries[j];
+ action_mask = entry->action_mask;
+
+ if (action_mask & (1LLU << act_type)) {
+ queue = &entry->act.queue;
+ ptr = RTE_PKT_METADATA_PTR(
+ pkts[j], pkt_offset);
+ *ptr = queue->index;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static __rte_always_inline int
+run_fc(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent Context: Read Only (update not required) */
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+ struct rte_mbuf **pkts = p->soft.fc.pkts;
+ uint16_t nb_rx_queues = hard_dev->data->nb_rx_queues;
+
+ /* Not part of the persistent context */
+ uint16_t nb_pkts, i;
+
+ /* Hard device RXQ read, Soft Device RXQ write */
+ for (i = 0; i < nb_rx_queues; i++) {
+ struct rte_ring *rxq = dev->data->rx_queues[i];
+
+ /* Hard device RXQ read */
+ nb_pkts = rte_eth_rx_burst(p->hard.port_id,
+ i, pkts, DEFAULT_BURST_SIZE);
+
+ /* Classify packets when complete burst is available */
+ if (nb_pkts) {
+ fc_flow_classifier_run(p, pkts, nb_pkts,
+ FC_ACTION_METADATA_OFFSET);
+
+ /* Soft device RXQ write */
+ rte_ring_enqueue_burst(rxq, (void **)pkts,
+ nb_pkts, NULL);
+ }
+ }
+ return 0;
+}
+
+static int
+rte_pmd_softnic_rx_run(uint16_t port_id)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t nb_rx_ops = p->params.rx_ops_data.nb_ops;
+ uint32_t i;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ for (i = 0; i < nb_rx_ops; i++)
+ p->params.rx_ops_data.ops[i](dev);
+
+ return 0;
+}
+
+static int
+rte_pmd_softnic_tx_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t nb_tx_ops = p->params.tx_ops_data.nb_ops;
+ uint32_t i;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
#endif
- return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
+ for (i = 0; i < nb_tx_ops; i++)
+ p->params.tx_ops_data.ops[i](dev);
+
+ return 0;
+}
+
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ rte_pmd_softnic_rx_run(port_id);
+ rte_pmd_softnic_tx_run(port_id);
+
+ return 0;
}
static struct ether_addr eth_addr = { .addr_bytes = {0} };
@@ -502,6 +712,20 @@ pmd_init(struct pmd_params *params, int numa_node)
}
}
+ /* Flow Classification */
+ if (params->soft.flags & PMD_FEATURE_FC) {
+ status = fc_init(p, params, numa_node);
+ if (status) {
+ if (p->params.soft.flags & PMD_FEATURE_TM)
+ tm_free(p);
+
+ default_free(p);
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
return p;
}
@@ -511,6 +735,9 @@ pmd_free(struct pmd_internals *p)
if (p->params.soft.flags & PMD_FEATURE_TM)
tm_free(p);
+ if (p->params.soft.flags & PMD_FEATURE_FC)
+ fc_free(p);
+
default_free(p);
free(p->params.hard.name);
@@ -539,9 +766,14 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
return -ENOMEM;
/* dev */
- soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
- NULL : /* set up later */
- pmd_rx_pkt_burst;
+ if (params->soft.intrusive == 0 &&
+ (params->soft.flags & PMD_FEATURE_FC))
+ soft_dev->rx_pkt_burst = pmd_rx_pkt_burst;
+ else if (params->soft.intrusive == 0)
+ soft_dev->rx_pkt_burst = pmd_rx_pkt_burst_default;
+ else
+ soft_dev->rx_pkt_burst = NULL; /* Set up later */
+
soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
soft_dev->tx_pkt_prepare = NULL;
soft_dev->dev_ops = &pmd_ops;
@@ -586,10 +818,63 @@ get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
return 0;
}
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+static int
+get_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
static int
pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
{
struct rte_kvargs *kvlist;
+ char key_mask_str[SOFTNIC_SOFT_FC_FLOW_RULE_KEY_MAX_SIZE * 2 + 1];
int i, ret;
kvlist = rte_kvargs_parse(params, pmd_valid_args);
@@ -606,6 +891,11 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
+ p->soft.fc.nb_rules = SOFTNIC_SOFT_FC_NB_RULES;
+ p->soft.fc.key_size = SOFTNIC_SOFT_FC_FLOW_RULE_KEY_SIZE;
+ p->soft.fc.key_offset = SOFTNIC_SOFT_FC_FLOW_RULE_KEY_OFFSET;
+ snprintf(key_mask_str, sizeof(key_mask_str),
+ "%s", "00FF0000FFFFFFFFFFFFFFFFFFFFFFFF");
p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
/* SOFT: TM (optional) */
@@ -718,6 +1008,75 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
p->soft.flags |= PMD_FEATURE_TM;
}
+ /* SOFT: FC (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_FC) == 1) {
+ char *s;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_FC,
+ &get_string, &s);
+ if (ret < 0)
+ goto out_free;
+
+ if (strcmp(s, "on") == 0)
+ p->soft.flags |= PMD_FEATURE_FC;
+ else if (strcmp(s, "off") == 0)
+ p->soft.flags &= ~PMD_FEATURE_FC;
+ else
+ ret = -EINVAL;
+
+ free(s);
+ if (ret)
+ goto out_free;
+ }
+
+ /* SOFT: FC number of flow rules (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_FC_NB_RULES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_FC_NB_RULES,
+ &get_uint32, &p->soft.fc.nb_rules);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_FC;
+ }
+
+ /* SOFT: FC flow rule key size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_FC_FLOW_KEY_SIZE) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_SIZE,
+ &get_uint32, &p->soft.fc.key_size);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_FC;
+ }
+
+ /* SOFT: FC flow rule key offset (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_FC_FLOW_KEY_OFFSET) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_OFFSET,
+ &get_uint32, &p->soft.fc.key_offset);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_FC;
+ }
+
+ /* SOFT: FC flow rule key mask (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_FC_FLOW_KEY_MASK) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_PARAM_SOFT_FC_FLOW_KEY_MASK,
+ &get_string, key_mask_str);
+ if (ret < 0)
+ goto out_free;
+
+ ret = get_hex_string(key_mask_str, p->soft.fc.key_mask,
+ &p->soft.fc.key_size);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_FC;
+ }
+
/* HARD: name (mandatory) */
if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
@@ -786,6 +1145,13 @@ pmd_probe(struct rte_vdev_device *vdev)
return status;
}
+ if (p.soft.flags & PMD_FEATURE_FC) {
+ status = fc_params_check(&p);
+
+ if (status)
+ return status;
+ }
+
/* Allocate and initialize soft ethdev private data */
dev_private = pmd_init(&p, numa_node);
if (dev_private == NULL)
@@ -847,5 +1213,10 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
+ PMD_PARAM_SOFT_FC "=on|off "
+ PMD_PARAM_SOFT_FC_NB_RULES "=<int> "
+ PMD_PARAM_SOFT_FC_FLOW_KEY_SIZE "=<int> "
+ PMD_PARAM_SOFT_FC_FLOW_KEY_OFFSET "=<int> "
+ PMD_PARAM_SOFT_FC_FLOW_KEY_MASK "=<string> "
PMD_PARAM_HARD_NAME "=<string> "
PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
index b49e582..1dd17b1 100644
--- a/drivers/net/softnic/rte_eth_softnic.h
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -56,6 +56,22 @@ extern "C" {
#define SOFTNIC_SOFT_TM_DEQ_BSZ 24
#endif
+#ifndef SOFTNIC_SOFT_FC_NB_RULES
+#define SOFTNIC_SOFT_FC_NB_RULES 4096
+#endif
+
+#ifndef SOFTNIC_SOFT_FC_FLOW_RULE_KEY_SIZE
+#define SOFTNIC_SOFT_FC_FLOW_RULE_KEY_SIZE 16
+#endif
+
+#ifndef SOFTNIC_SOFT_FC_FLOW_RULE_KEY_OFFSET
+#define SOFTNIC_SOFT_FC_FLOW_RULE_KEY_OFFSET 278
+#endif
+
+#ifndef SOFTNIC_SOFT_FC_FLOW_RULE_KEY_MAX_SIZE
+#define SOFTNIC_SOFT_FC_FLOW_RULE_KEY_MAX_SIZE 64
+#endif
+
#ifndef SOFTNIC_HARD_TX_QUEUE_ID
#define SOFTNIC_HARD_TX_QUEUE_ID 0
#endif
diff --git a/drivers/net/softnic/rte_eth_softnic_fc.c b/drivers/net/softnic/rte_eth_softnic_fc.c
new file mode 100644
index 0000000..83f7fd3
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_fc.c
@@ -0,0 +1,377 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+
+#include "rte_eth_softnic_internals.h"
+
+static inline uint64_t
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = seed ^ (k[0] & m[0]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ xor0 ^= k[2] & m[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4] & m[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+ xor2 ^= k[6] & m[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+rte_table_hash_op_hash hash_func[] = {
+ hash_xor_key8,
+ hash_xor_key16,
+ hash_xor_key24,
+ hash_xor_key32,
+ hash_xor_key40,
+ hash_xor_key48,
+ hash_xor_key56,
+ hash_xor_key64
+};
+
+int
+fc_params_check(struct pmd_params *params)
+{
+ /* nb_max_rules */
+ if (params->soft.fc.nb_rules == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+fc_table_check_params(struct pmd_internals *p,
+ struct fc_table_params *params)
+{
+ /* Number of tables */
+ if (p->soft.fc.nb_tables == FC_TABLE_HASH_KEY_MAX)
+ return -EINVAL;
+
+ /* Parameter */
+ if (params == NULL)
+ return -EINVAL;
+
+ /* Ops */
+ if (params->ops == NULL)
+ return -EINVAL;
+
+ if (params->ops->f_create == NULL)
+ return -EINVAL;
+
+ if (params->ops->f_lookup == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rte_flow_classify_table_create(struct pmd_internals *p,
+ struct fc_table *table,
+ struct fc_table_params *params,
+ int numa_node)
+{
+ void *h_table;
+ uint32_t entry_size;
+ int ret;
+
+ /* Check input arguments */
+ ret = fc_table_check_params(p, params);
+ if (ret != 0)
+ return ret;
+
+ /* calculate table entry size */
+ entry_size = sizeof(struct fc_table_entry);
+
+ /* Create the table */
+ h_table = params->ops->f_create(params->arg_create, numa_node,
+ entry_size);
+ if (h_table == NULL)
+ return -EINVAL;
+
+ table->type = params->type;
+ p->soft.fc.table_mask |= table->type;
+ p->soft.fc.nb_tables++;
+
+ /* Save input parameters */
+ memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+
+ /* Initialize table internal data structure */
+ table->entry_size = entry_size;
+ table->h_table = h_table;
+
+ return 0;
+}
+
+static int
+fc_tables_create(struct pmd_internals *p,
+ struct pmd_params *params, int numa_node)
+{
+ int ret;
+ struct fc_table *table;
+
+ /** Flow Classification: Hash table (Key size = 8 bytes) */
+ struct rte_table_hash_params table_hash_params = {
+ .name = params->soft.name,
+ .key_size = params->soft.fc.key_size,
+ .key_offset = params->soft.fc.key_offset,
+ .key_mask = params->soft.fc.key_mask,
+ .n_keys = params->soft.fc.nb_rules,
+ .n_buckets = rte_align32pow2(params->soft.fc.nb_rules / 4),
+ .f_hash = hash_func[(params->soft.fc.key_size / 8) - 1],
+ .seed = 0,
+ };
+
+ struct fc_table_params table_params = {
+ .ops = NULL,
+ .arg_create = &table_hash_params,
+ .type = FC_TABLE_HASH_KEY_MAX,
+ };
+
+ switch (params->soft.fc.key_size) {
+ case 8:
+ table_params.ops = &rte_table_hash_key8_ext_ops;
+ table_params.type = FC_TABLE_HASH_KEY8;
+ break;
+
+ case 16:
+ table_params.ops = &rte_table_hash_key16_ext_ops;
+ table_params.type = FC_TABLE_HASH_KEY16;
+ break;
+
+ default:
+ table_params.ops = &rte_table_hash_ext_ops;
+ table_params.type = FC_TABLE_HASH_KEY_EXT;
+ }
+
+ table = &p->soft.fc.tables[table_params.type];
+
+ /** Create hash table */
+ ret = rte_flow_classify_table_create(p, table,
+ &table_params,
+ numa_node);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+fc_flow_init(struct pmd_internals *p)
+{
+ /* Initialize classifier flow list */
+ TAILQ_INIT(&p->soft.fc.flow_list);
+}
+
+static void
+fc_flow_uninit(struct pmd_internals *p)
+{
+ /** Remove all rules from the list */
+ for ( ; ; ) {
+ struct rte_flow *flow;
+
+ flow = TAILQ_FIRST(&p->soft.fc.flow_list);
+ if (flow == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.fc.flow_list, flow, node);
+ free(flow);
+ }
+}
+
+int
+fc_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ int ret;
+
+ memset(&p->soft.fc, 0, sizeof(p->soft.fc));
+
+ p->soft.fc.pkts = rte_zmalloc_socket(params->soft.name,
+ 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+ if (p->soft.fc.pkts == NULL)
+ return -ENOMEM;
+
+ /** Create all tables/filters for classifiers */
+ ret = fc_tables_create(p, params, numa_node);
+ if (ret) {
+ rte_free(p->soft.fc.pkts);
+ return ret;
+ }
+
+ fc_flow_init(p);
+
+ return 0;
+}
+
+void
+fc_free(struct pmd_internals *p)
+{
+ fc_flow_uninit(p);
+ rte_free(p->soft.fc.pkts);
+}
+
+const struct rte_flow_ops pmd_flow_ops = {
+ .validate = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .flush = NULL,
+};
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 1f75806..9e59d62 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -39,7 +39,10 @@
#include <rte_mbuf.h>
#include <rte_sched.h>
#include <rte_ethdev.h>
+#include <rte_table.h>
+#include <rte_table_hash.h>
#include <rte_tm_driver.h>
+#include <rte_flow_driver.h>
#include "rte_eth_softnic.h"
@@ -48,13 +51,30 @@
*/
enum pmd_feature {
- PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
+ /** Traffic Management (TM) */
+ PMD_FEATURE_TM = 1 << 0,
+
+ /** Flow Clasification (FC) */
+ PMD_FEATURE_FC = 1 << 1,
};
#ifndef INTRUSIVE
#define INTRUSIVE 0
#endif
+#ifndef MAX_NB_OPS
+#define MAX_NB_OPS 32
+#endif
+
+#ifndef FC_FLOW_KEY_MAX_SIZE
+#define FC_FLOW_KEY_MAX_SIZE 64
+#endif
+
+/**
+ * Softnic operations
+ */
+typedef int (*soft_ops)(struct rte_eth_dev *dev);
+
struct pmd_params {
/** Parameters for the soft device (to be created) */
struct {
@@ -77,6 +97,14 @@ struct pmd_params {
uint32_t enq_bsz; /**< Enqueue burst size */
uint32_t deq_bsz; /**< Dequeue burst size */
} tm;
+
+ /** Flow Classification (FC) */
+ struct {
+ uint32_t nb_rules; /**< Max number of flow rules */
+ uint32_t key_size; /**< Flow rule key */
+ uint8_t key_mask[FC_FLOW_KEY_MAX_SIZE]; /**< Key mask */
+ uint32_t key_offset; /**< Key offset */
+ } fc;
} soft;
/** Parameters for the hard device (existing) */
@@ -84,6 +112,19 @@ struct pmd_params {
char *name; /**< Name */
uint16_t tx_queue_id; /**< TX queue ID */
} hard;
+
+ /** Parameters for the softnic operations (rx & tx) */
+ struct {
+ uint32_t nb_ops;
+ uint32_t ops_flag;
+ soft_ops ops[MAX_NB_OPS];
+ } rx_ops_data;
+
+ struct {
+ uint32_t nb_ops;
+ uint32_t ops_flag;
+ soft_ops ops[MAX_NB_OPS];
+ } tx_ops_data;
};
/**
@@ -225,6 +266,102 @@ struct tm_internals {
};
/**
+ * Flow Classification (FC) Internals
+ */
+#ifndef FC_ACTION_METADATA_OFFSET
+#define FC_ACTION_METADATA_OFFSET 128
+#endif
+
+#ifndef FC_FLOW_PACKET_BURST_SIZE_MAX
+#define FC_FLOW_PACKET_BURST_SIZE_MAX 64
+#endif
+
+#define RTE_PKT_METADATA_PTR(pkt, offset) \
+ (&((uint32_t *)(pkt))[offset])
+
+/**
+ * FC flow (PMD-specific definition of struct rte_flow)
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ struct rte_flow_attr attr; /**< Attributes. */
+ struct rte_flow_item *pattern; /**< Items. */
+ struct rte_flow_action *actions; /**< Actions. */
+};
+
+TAILQ_HEAD(fc_flow_list, rte_flow);
+
+/* FC tables type */
+enum fc_table_hash_type {
+ FC_TABLE_HASH_KEY8 = 0,
+ FC_TABLE_HASH_KEY16,
+ FC_TABLE_HASH_KEY_EXT,
+ FC_TABLE_HASH_KEY_MAX,
+};
+
+/**
+ * FC Flow classification table parameters
+ */
+struct fc_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+ /** Opaque param to be passed to the table create operation when
+ * invoked
+ */
+ void *arg_create;
+
+ /** Classifier table type */
+ enum fc_table_hash_type type;
+};
+
+/**
+ * Head format for the table entry of table. For any given
+ * FC table, all table entries should have the same size and format. For
+ * any given table, the table entry has to start with a head of this
+ * structure, which contains actions mask and their associated
+ * meta-data.
+ */
+struct fc_table_entry {
+ /** Flow action mask */
+ uint64_t action_mask;
+
+ struct action {
+ /** Assign packets to a given queue index */
+ struct rte_flow_action_queue queue;
+
+ /** Traffic metering and policing (MTR) */
+ struct rte_flow_action_meter meter;
+
+ } act;
+};
+
+/** FC Flow table */
+struct fc_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ uint32_t entry_size;
+ enum fc_table_hash_type type;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+};
+
+struct fc_internals {
+ /** Flow rule list */
+ struct fc_flow_list flow_list;
+ uint32_t n_flow_rules;
+
+ /** Flow classification table */
+ struct fc_table tables[FC_TABLE_HASH_KEY_MAX];
+ uint32_t nb_tables;
+ uint32_t table_mask;
+
+ /** Run-time */
+ struct rte_mbuf **pkts;
+ struct fc_table_entry *entries[FC_FLOW_PACKET_BURST_SIZE_MAX];
+};
+
+/**
* PMD Internals
*/
struct pmd_internals {
@@ -235,6 +372,7 @@ struct pmd_internals {
struct {
struct default_internals def; /**< Default */
struct tm_internals tm; /**< Traffic Management */
+ struct fc_internals fc; /**< flow classification */
} soft;
/** Hard device */
@@ -256,6 +394,11 @@ struct pmd_rx_queue {
*/
extern const struct rte_tm_ops pmd_tm_ops;
+/**
+ * Flow API operation
+ */
+extern const struct rte_flow_ops pmd_flow_ops;
+
int
tm_params_check(struct pmd_params *params, uint32_t hard_rate);
@@ -288,4 +431,21 @@ tm_used(struct rte_eth_dev *dev)
p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
}
+int
+fc_params_check(struct pmd_params *params);
+
+int
+fc_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+
+void
+fc_free(struct pmd_internals *p);
+
+static inline int
+fc_enabled(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_FC);
+}
+
#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a745..72d684c 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -60,7 +60,9 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
#
_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += -lrte_flow_classify
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += --no-whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
_LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
--
2.9.3
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH 2/2] net/softnic: add flow classification ops
2017-11-30 20:08 [dpdk-dev] [PATCH 0/2] net/softnic: add flow classification support Jasvinder Singh
2017-11-30 20:08 ` [dpdk-dev] [PATCH 1/2] net/softnic: enable flow classification function Jasvinder Singh
@ 2017-11-30 20:08 ` Jasvinder Singh
2017-12-07 21:02 ` [dpdk-dev] [PATCH 0/2] net/softnic: add flow classification support Ferruh Yigit
2 siblings, 0 replies; 8+ messages in thread
From: Jasvinder Singh @ 2017-11-30 20:08 UTC (permalink / raw)
To: dev; +Cc: cristian.dumitrescu, ferruh.yigit
To manage and configure flow rules, implements operations to
validate, create, destroy and flush the flow rules.
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
drivers/net/softnic/rte_eth_softnic_fc.c | 311 ++++++++++++++++++++++++++++++-
1 file changed, 307 insertions(+), 4 deletions(-)
diff --git a/drivers/net/softnic/rte_eth_softnic_fc.c b/drivers/net/softnic/rte_eth_softnic_fc.c
index 83f7fd3..e41eb57 100644
--- a/drivers/net/softnic/rte_eth_softnic_fc.c
+++ b/drivers/net/softnic/rte_eth_softnic_fc.c
@@ -330,6 +330,9 @@ fc_flow_uninit(struct pmd_internals *p)
break;
TAILQ_REMOVE(&p->soft.fc.flow_list, flow, node);
+
+ free(flow->pattern);
+ free(flow->actions);
free(flow);
}
}
@@ -369,9 +372,309 @@ fc_free(struct pmd_internals *p)
rte_free(p->soft.fc.pkts);
}
+static struct rte_flow *
+fc_flow_search(struct pmd_internals *p,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[])
+{
+ struct fc_flow_list *fl = &p->soft.fc.flow_list;
+ struct rte_flow *f;
+ uint32_t n_item, n_act, n_match_params;
+
+ TAILQ_FOREACH(f, fl, node) {
+ n_item = 0;
+ n_act = 0;
+ n_match_params = 0;
+
+ /* Check: Flow attr */
+ if (memcmp(&f->attr,
+ (const void *)attr, sizeof(*attr)))
+ continue;
+ else
+ n_match_params += 1;
+
+ /* Check: Flow pattern */
+ while (pattern[n_item].type != RTE_FLOW_ITEM_TYPE_END) {
+ if (pattern[n_item].type != f->pattern[n_item].type)
+ n_match_params = 0;
+
+ n_item++;
+ }
+
+ if (n_match_params)
+ n_match_params += 1;
+ else
+ continue;
+
+ /* Check: Flow action */
+ while (actions[n_act].type != RTE_FLOW_ACTION_TYPE_END) {
+ if (actions[n_act].type != f->actions[n_act].type)
+ n_match_params = 0;
+
+ n_act++;
+ }
+
+ if (n_match_params)
+ n_match_params += 1;
+ else
+ continue;
+
+ if (n_match_params == 3)
+ return f;
+ }
+ return NULL;
+}
+
+static int rule_add_check(struct pmd_internals *p,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ uint32_t n_flow_rules;
+
+ /** Check: Number of rules */
+ n_flow_rules = p->soft.fc.n_flow_rules;
+ if (n_flow_rules >= p->params.soft.fc.nb_rules) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Number of Flow rule exceeds.");
+ return -EINVAL;
+ }
+
+ flow = fc_flow_search(p, attr, pattern, actions);
+ if (flow != NULL) {
+ rte_flow_error_set(error,
+ EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow rule exists.");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+/* Flow rule validate */
+static int
+pmd_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Add validate function */
+
+ return 0;
+}
+
+/* Create flow rule */
+static struct rte_flow *
+pmd_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct fc_flow_list *fl = &p->soft.fc.flow_list;
+ struct rte_flow *f;
+ uint32_t n_item = 0, n_act = 0;
+ int status;
+
+ /** Check: Attributes */
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return NULL;
+ }
+
+ /** Check: Pattern */
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return NULL;
+ }
+
+ /** Check: Actions */
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return NULL;
+ }
+
+ /** Check: Rule */
+ status = rule_add_check(p, attr, pattern, actions, error);
+ if (status)
+ return NULL;
+
+ /** Add rule to the table */
+
+ /* Memory allocation */
+ f = calloc(1, sizeof(struct rte_flow));
+ if (f == NULL) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to create flow.");
+ return NULL;
+ }
+
+ /** Add attr to list */
+ memcpy((void *)&f->attr, (const void *)attr, sizeof(*attr));
+
+ /** Allocate for pattern */
+ while ((pattern + n_item)->type != RTE_FLOW_ITEM_TYPE_END)
+ n_item++;
+
+ n_item++;
+
+ f->pattern = calloc(n_item, sizeof(struct rte_flow_item));
+ if (f->pattern) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to create flow.");
+ return NULL;
+ }
+
+ /** Add pattern */
+ memcpy((void *)f->pattern, (const void *)pattern, sizeof(*f->pattern));
+
+ /** Allocate for action */
+ while ((actions + n_act)->type != RTE_FLOW_ACTION_TYPE_END)
+ n_act++;
+
+ n_act++;
+
+ f->actions = calloc(n_act, sizeof(struct rte_flow_action));
+ if (f->actions) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to create flow.");
+ return NULL;
+ }
+
+ /** Add actions */
+ memcpy((void *)f->actions, (const void *)actions, sizeof(*f->actions));
+
+ TAILQ_INSERT_TAIL(fl, f, node);
+ p->soft.fc.n_flow_rules++;
+
+ return f;
+}
+
+/* Destroy flow rule */
+static int
+pmd_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct fc_flow_list *fl = &p->soft.fc.flow_list;
+ struct rte_flow *f;
+
+ /** Check: flow */
+ if (!flow) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "NULL flow.");
+ return -EINVAL;
+ }
+ /* Check existing */
+ f = fc_flow_search(p, &flow->attr, flow->pattern, flow->actions);
+ if (f == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow not exist");
+ return -EINVAL;
+ }
+
+ /** Remove from Classifier table */
+
+ /** Remove from list */
+ TAILQ_REMOVE(fl, f, node);
+ p->soft.fc.n_flow_rules--;
+ free(f);
+
+ return 0;
+}
+
+/* Destroy flow rules */
+static int
+pmd_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ for ( ; ; ) {
+ struct rte_flow *f;
+
+ f = TAILQ_FIRST(&p->soft.fc.flow_list);
+ if (f == NULL)
+ break;
+
+ /** Remove from Classifier table */
+
+ /** Remove from list */
+ TAILQ_REMOVE(&p->soft.fc.flow_list, f, node);
+ free(f);
+ }
+
+ return 0;
+}
+
const struct rte_flow_ops pmd_flow_ops = {
- .validate = NULL,
- .create = NULL,
- .destroy = NULL,
- .flush = NULL,
+ .validate = pmd_flow_validate,
+ .create = pmd_flow_create,
+ .destroy = pmd_flow_destroy,
+ .flush = pmd_flow_flush,
};
--
2.9.3
^ permalink raw reply [flat|nested] 8+ messages in thread