DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers
@ 2021-02-16 20:21 Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:21 UTC (permalink / raw)
  To: dev

Improve the internal table entry helper routines for key comparison,
entry duplication and checks.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 120 ++++++++++++++++--------------
 1 file changed, 65 insertions(+), 55 deletions(-)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index ee6df4544..62ce3086d 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -234,6 +234,26 @@ table_entry_alloc(struct table *table)
 	return NULL;
 }
 
+static int
+table_entry_key_check_em(struct table *table, struct rte_swx_table_entry *entry)
+{
+	uint8_t *key_mask0 = table->params.key_mask0;
+	uint32_t key_size = table->params.key_size, i;
+
+	if (!entry->key_mask)
+		return 0;
+
+	for (i = 0; i < key_size; i++) {
+		uint8_t km0 = key_mask0[i];
+		uint8_t km = entry->key_mask[i];
+
+		if ((km & km0) != km0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 		  uint32_t table_id,
@@ -242,6 +262,7 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 		  int data_check)
 {
 	struct table *table = &ctl->tables[table_id];
+	int status;
 
 	CHECK(entry, EINVAL);
 
@@ -266,7 +287,9 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 				break;
 
 			case RTE_SWX_TABLE_MATCH_EXACT:
-				CHECK(!entry->key_mask, EINVAL);
+				status = table_entry_key_check_em(table, entry);
+				if (status)
+					return status;
 				break;
 
 			default:
@@ -327,10 +350,7 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 		new_entry->key_signature = entry->key_signature;
 
 		/* key_mask. */
-		if (table->params.match_type != RTE_SWX_TABLE_MATCH_EXACT) {
-			if (!entry->key_mask)
-				goto error;
-
+		if (entry->key_mask) {
 			new_entry->key_mask = malloc(table->params.key_size);
 			if (!new_entry->key_mask)
 				goto error;
@@ -357,18 +377,24 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 
 		/* action_data. */
 		a = &ctl->actions[entry->action_id];
-		if (a->data_size) {
-			if (!entry->action_data)
-				goto error;
+		if (a->data_size && !entry->action_data)
+			goto error;
 
-			new_entry->action_data = malloc(a->data_size);
-			if (!new_entry->action_data)
-				goto error;
+		/* The table layer provisions a constant action data size per
+		 * entry, which should be the largest data size for all the
+		 * actions enabled for the current table, and atempts to copy
+		 * this many bytes each time a table entry is added, even if the
+		 * specific action requires less data or even no data at all,
+		 * hence we always have to allocate the max.
+		 */
+		new_entry->action_data = calloc(1, table->params.action_data_size);
+		if (!new_entry->action_data)
+			goto error;
 
+		if (a->data_size)
 			memcpy(new_entry->action_data,
 			       entry->action_data,
 			       a->data_size);
-		}
 	}
 
 	return new_entry;
@@ -378,58 +404,36 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 	return NULL;
 }
 
-static int
-entry_keycmp_em(struct rte_swx_table_entry *e0,
-		struct rte_swx_table_entry *e1,
-		uint32_t key_size)
-{
-	if (e0->key_signature != e1->key_signature)
-		return 1; /* Not equal. */
-
-	if (memcmp(e0->key, e1->key, key_size))
-		return 1; /* Not equal. */
-
-	return 0; /* Equal */
-}
-
-static int
-entry_keycmp_wm(struct rte_swx_table_entry *e0 __rte_unused,
-		struct rte_swx_table_entry *e1 __rte_unused,
-		uint32_t key_size __rte_unused)
-{
-	/* TBD */
-
-	return 1; /* Not equal */
-}
-
-static int
-entry_keycmp_lpm(struct rte_swx_table_entry *e0 __rte_unused,
-		 struct rte_swx_table_entry *e1 __rte_unused,
-		 uint32_t key_size __rte_unused)
-{
-	/* TBD */
-
-	return 1; /* Not equal */
-}
-
 static int
 table_entry_keycmp(struct table *table,
 		   struct rte_swx_table_entry *e0,
 		   struct rte_swx_table_entry *e1)
 {
-	switch (table->params.match_type) {
-	case RTE_SWX_TABLE_MATCH_EXACT:
-		return entry_keycmp_em(e0, e1, table->params.key_size);
+	uint32_t key_size = table->params.key_size;
+	uint32_t i;
+
+	for (i = 0; i < key_size; i++) {
+		uint8_t *key_mask0 = table->params.key_mask0;
+		uint8_t km0, km[2], k[2];
+
+		km0 = key_mask0 ? key_mask0[i] : 0xFF;
+
+		km[0] = e0->key_mask ? e0->key_mask[i] : 0xFF;
+		km[1] = e1->key_mask ? e1->key_mask[i] : 0xFF;
 
-	case RTE_SWX_TABLE_MATCH_WILDCARD:
-		return entry_keycmp_wm(e0, e1, table->params.key_size);
+		k[0] = e0->key[i];
+		k[1] = e1->key[i];
 
-	case RTE_SWX_TABLE_MATCH_LPM:
-		return entry_keycmp_lpm(e0, e1, table->params.key_size);
+		/* Mask comparison. */
+		if ((km[0] & km0) != (km[1] & km0))
+			return 1; /* Not equal. */
 
-	default:
-		return 1; /* Not equal. */
+		/* Value comparison. */
+		if ((k[0] & km[0] & km0) != (k[1] & km[1] & km0))
+			return 1; /* Not equal. */
 	}
+
+	return 0; /* Equal. */
 }
 
 static struct rte_swx_table_entry *
@@ -893,6 +897,9 @@ rte_swx_ctl_pipeline_table_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	CHECK(table, EINVAL);
 	table_id = table - ctl->tables;
 
+	CHECK(entry, EINVAL);
+	CHECK(!table_entry_check(ctl, table_id, entry, 1, 1), EINVAL);
+
 	new_entry = table_entry_duplicate(ctl, table_id, entry, 1, 1);
 	CHECK(new_entry, ENOMEM);
 
@@ -1095,6 +1102,9 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	table_id = table - ctl->tables;
 	CHECK(!table->info.default_action_is_const, EINVAL);
 
+	CHECK(entry, EINVAL);
+	CHECK(!table_entry_check(ctl, table_id, entry, 0, 1), EINVAL);
+
 	new_entry = table_entry_duplicate(ctl, table_id, entry, 0, 1);
 	CHECK(new_entry, ENOMEM);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] pipeline: improve table entry parsing
  2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
@ 2021-02-16 20:21 ` Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:21 UTC (permalink / raw)
  To: dev; +Cc: Venkata Suresh Kumar P, Churchill Khangar

Improve the table entry parsing: better code structure, enable parsing
for the key field masks, allow comments and empty lines in the table
entry files.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Signed-off-by: Venkata Suresh Kumar P <venkata.suresh.kumar.p@intel.com>
Signed-off-by: Churchill Khangar <churchill.khangar@intel.com>
---
 examples/pipeline/cli.c           |  21 +++-
 lib/librte_pipeline/rte_swx_ctl.c | 172 ++++++++++++++++++++----------
 lib/librte_pipeline/rte_swx_ctl.h |   7 +-
 3 files changed, 141 insertions(+), 59 deletions(-)

diff --git a/examples/pipeline/cli.c b/examples/pipeline/cli.c
index e97e12060..30c2dd34d 100644
--- a/examples/pipeline/cli.c
+++ b/examples/pipeline/cli.c
@@ -881,14 +881,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_add)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_add) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_add, line_id);
 				goto error;
@@ -911,14 +916,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_delete)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_delete) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_delete, line_id);
 				goto error;
@@ -940,14 +950,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_default)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_default) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_default, line_id);
 				goto error;
diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index 62ce3086d..ce3344fdc 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -1339,19 +1339,32 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl)
 		table_abort(ctl, i);
 }
 
+static int
+token_is_comment(const char *token)
+{
+	if ((token[0] == '#') ||
+	    (token[0] == ';') ||
+	    ((token[0] == '/') && (token[1] == '/')))
+		return 1; /* TRUE. */
+
+	return 0; /* FALSE. */
+}
+
 #define RTE_SWX_CTL_ENTRY_TOKENS_MAX 256
 
 struct rte_swx_table_entry *
 rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				      const char *table_name,
-				      const char *string)
+				      const char *string,
+				      int *is_blank_or_comment)
 {
-	char *tokens[RTE_SWX_CTL_ENTRY_TOKENS_MAX];
+	char *token_array[RTE_SWX_CTL_ENTRY_TOKENS_MAX], **tokens;
 	struct table *table;
 	struct action *action;
 	struct rte_swx_table_entry *entry = NULL;
 	char *s0 = NULL, *s;
 	uint32_t n_tokens = 0, arg_offset = 0, i;
+	int blank_or_comment = 0;
 
 	/* Check input arguments. */
 	if (!ctl)
@@ -1381,37 +1394,66 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		char *token;
 
 		token = strtok_r(s, " \f\n\r\t\v", &s);
-		if (!token)
+		if (!token || token_is_comment(token))
 			break;
 
 		if (n_tokens >= RTE_SWX_CTL_ENTRY_TOKENS_MAX)
 			goto error;
 
-		tokens[n_tokens] = token;
+		token_array[n_tokens] = token;
 		n_tokens++;
 	}
 
-	if ((n_tokens < 3 + table->info.n_match_fields) ||
-	    strcmp(tokens[0], "match") ||
-	    strcmp(tokens[1 + table->info.n_match_fields], "action"))
-		goto error;
-
-	action = action_find(ctl, tokens[2 + table->info.n_match_fields]);
-	if (!action)
+	if (!n_tokens) {
+		blank_or_comment = 1;
 		goto error;
+	}
 
-	if (n_tokens != 3 + table->info.n_match_fields +
-	    action->info.n_args * 2)
-		goto error;
+	tokens = token_array;
 
 	/*
 	 * Match.
 	 */
+	if (n_tokens && strcmp(tokens[0], "match"))
+		goto action;
+
+	if (n_tokens < 1 + table->info.n_match_fields)
+		goto error;
+
 	for (i = 0; i < table->info.n_match_fields; i++) {
 		struct rte_swx_ctl_table_match_field_info *mf = &table->mf[i];
-		char *mf_val = tokens[1 + i];
-		uint64_t val;
+		char *mf_val = tokens[1 + i], *mf_mask = NULL;
+		uint64_t val, mask = UINT64_MAX;
+		uint32_t offset = (mf->offset - table->mf[0].offset) / 8;
+
+		/*
+		 * Mask.
+		 */
+		mf_mask = strchr(mf_val, '/');
+		if (mf_mask) {
+			*mf_mask = 0;
+			mf_mask++;
+
+			/* Parse. */
+			mask = strtoull(mf_mask, &mf_mask, 0);
+			if (mf_mask[0])
+				goto error;
+
+			/* Endianness conversion. */
+			if (mf->is_header)
+				mask = field_hton(mask, mf->n_bits);
+		}
 
+			/* Copy to entry. */
+			if (entry->key_mask)
+				memcpy(&entry->key_mask[offset],
+				       (uint8_t *)&mask,
+				       mf->n_bits / 8);
+
+		/*
+		 * Value.
+		 */
+		/* Parse. */
 		val = strtoull(mf_val, &mf_val, 0);
 		if (mf_val[0])
 			goto error;
@@ -1420,17 +1462,32 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		if (mf->is_header)
 			val = field_hton(val, mf->n_bits);
 
-		/* Copy key and key_mask to entry. */
-		memcpy(&entry->key[(mf->offset - table->mf[0].offset) / 8],
+		/* Copy to entry. */
+		memcpy(&entry->key[offset],
 		       (uint8_t *)&val,
 		       mf->n_bits / 8);
-
-		/* TBD Set entry->key_mask for wildcard and LPM tables. */
 	}
 
+	tokens += 1 + table->info.n_match_fields;
+	n_tokens -= 1 + table->info.n_match_fields;
+
 	/*
 	 * Action.
 	 */
+action:
+	if (n_tokens && strcmp(tokens[0], "action"))
+		goto other;
+
+	if (n_tokens < 2)
+		goto error;
+
+	action = action_find(ctl, tokens[1]);
+	if (!action)
+		goto error;
+
+	if (n_tokens < 2 + action->info.n_args * 2)
+		goto error;
+
 	/* action_id. */
 	entry->action_id = action - ctl->actions;
 
@@ -1441,8 +1498,8 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		uint64_t val;
 		int is_nbo = 0;
 
-		arg_name = tokens[3 + table->info.n_match_fields + i * 2];
-		arg_val = tokens[3 + table->info.n_match_fields + i * 2 + 1];
+		arg_name = tokens[2 + i * 2];
+		arg_val = tokens[2 + i * 2 + 1];
 
 		if (strcmp(arg_name, arg->name) ||
 		    (strlen(arg_val) < 4) ||
@@ -1473,15 +1530,50 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		arg_offset += arg->n_bits / 8;
 	}
 
+	tokens += 2 + action->info.n_args * 2;
+	n_tokens -= 2 + action->info.n_args * 2;
+
+other:
+	if (n_tokens)
+		goto error;
+
 	free(s0);
 	return entry;
 
 error:
 	table_entry_free(entry);
 	free(s0);
+	if (is_blank_or_comment)
+		*is_blank_or_comment = blank_or_comment;
 	return NULL;
 }
 
+static void
+table_entry_printf(FILE *f,
+		   struct rte_swx_ctl_pipeline *ctl,
+		   struct table *table,
+		   struct rte_swx_table_entry *entry)
+{
+	struct action *action = &ctl->actions[entry->action_id];
+	uint32_t i;
+
+	fprintf(f, "match ");
+	for (i = 0; i < table->params.key_size; i++)
+		fprintf(f, "%02x", entry->key[i]);
+
+	if (entry->key_mask) {
+		fprintf(f, "/");
+		for (i = 0; i < table->params.key_size; i++)
+			fprintf(f, "%02x", entry->key_mask[i]);
+	}
+
+	fprintf(f, " action %s ", action->info.name);
+	for (i = 0; i < action->data_size; i++)
+		fprintf(f, "%02x", entry->action_data[i]);
+
+	fprintf(f, "\n");
+}
+
 int
 rte_swx_ctl_pipeline_table_fprintf(FILE *f,
 				   struct rte_swx_ctl_pipeline *ctl,
@@ -1512,47 +1604,17 @@ rte_swx_ctl_pipeline_table_fprintf(FILE *f,
 
 	/* Table entries. */
 	TAILQ_FOREACH(entry, &table->entries, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
 	TAILQ_FOREACH(entry, &table->pending_modify0, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
 	TAILQ_FOREACH(entry, &table->pending_delete, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
diff --git a/lib/librte_pipeline/rte_swx_ctl.h b/lib/librte_pipeline/rte_swx_ctl.h
index 32815b69e..530671db1 100644
--- a/lib/librte_pipeline/rte_swx_ctl.h
+++ b/lib/librte_pipeline/rte_swx_ctl.h
@@ -521,6 +521,10 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl);
  *   Table name.
  * @param[in] string
  *   String containing the table entry.
+ * @param[out] is_blank_or_comment
+ *   On error, this argument provides an indication of whether *string* contains
+ *   an invalid table entry (set to zero) or a blank or comment line that should
+ *   typically be ignored (set to a non-zero value).
  * @return
  *   0 on success or the following error codes otherwise:
  *   -EINVAL: Invalid argument.
@@ -529,7 +533,8 @@ __rte_experimental
 struct rte_swx_table_entry *
 rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				      const char *table_name,
-				      const char *string);
+				      const char *string,
+				      int *is_blank_or_comment);
 
 /**
  * Pipeline table print to file
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] pipeline: support non-incremental table updates
  2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
@ 2021-02-16 20:21 ` Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 4/5] table: add table entry priority Cristian Dumitrescu
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:21 UTC (permalink / raw)
  To: dev

Some table types (e.g. exact match/hash) allow for incremental table
updates, while others (e.g. wildcard match/ACL) do not. The former is
already supported, the latter is enabled by this patch.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 320 ++++++++++++++++++++++++------
 1 file changed, 261 insertions(+), 59 deletions(-)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index ce3344fdc..3e8e283c3 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -42,11 +42,38 @@ struct table {
 	struct rte_swx_table_ops ops;
 	struct rte_swx_table_params params;
 
+	/* Set of "stable" keys: these keys are currently part of the table;
+	 * these keys will be preserved with no action data changes after the
+	 * next commit.
+	 */
 	struct rte_swx_table_entry_list entries;
+
+	/* Set of new keys: these keys are currently NOT part of the table;
+	 * these keys will be added to the table on the next commit, if
+	 * the commit operation is successful.
+	 */
 	struct rte_swx_table_entry_list pending_add;
+
+	/* Set of keys to be modified: these keys are currently part of the
+	 * table; these keys are still going to be part of the table after the
+	 * next commit, but their action data will be modified if the commit
+	 * operation is successful. The modify0 list contains the keys with the
+	 * current action data, the modify1 list contains the keys with the
+	 * modified action data.
+	 */
 	struct rte_swx_table_entry_list pending_modify0;
 	struct rte_swx_table_entry_list pending_modify1;
+
+	/* Set of keys to be deleted: these keys are currently part of the
+	 * table; these keys are to be deleted from the table on the next
+	 * commit, if the commit operation is successful.
+	 */
 	struct rte_swx_table_entry_list pending_delete;
+
+	/* The pending default action: this is NOT the current default action;
+	 * this will be the new default action after the next commit, if the
+	 * next commit operation is successful.
+	 */
 	struct rte_swx_table_entry *pending_default;
 
 	int is_stub;
@@ -609,6 +636,31 @@ table_pending_default_free(struct table *table)
 	table->pending_default = NULL;
 }
 
+static int
+table_is_update_pending(struct table *table, int consider_pending_default)
+{
+	struct rte_swx_table_entry *e;
+	uint32_t n = 0;
+
+	/* Pending add. */
+	TAILQ_FOREACH(e, &table->pending_add, node)
+		n++;
+
+	/* Pending modify. */
+	TAILQ_FOREACH(e, &table->pending_modify1, node)
+		n++;
+
+	/* Pending delete. */
+	TAILQ_FOREACH(e, &table->pending_delete, node)
+		n++;
+
+	/* Pending default. */
+	if (consider_pending_default && table->pending_default)
+		n++;
+
+	return n;
+}
+
 static void
 table_free(struct rte_swx_ctl_pipeline *ctl)
 {
@@ -680,7 +732,7 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
 		struct rte_swx_table_state *ts_next = &ctl->ts_next[i];
 
 		/* Table object. */
-		if (!table->is_stub) {
+		if (!table->is_stub && table->ops.add) {
 			ts_next->obj = table->ops.create(&table->params,
 							 &table->entries,
 							 table->info.args,
@@ -691,6 +743,9 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
 			}
 		}
 
+		if (!table->is_stub && !table->ops.add)
+			ts_next->obj = ts->obj;
+
 		/* Default action data: duplicate from current table state. */
 		ts_next->default_action_data =
 			malloc(table->params.action_data_size);
@@ -1114,54 +1169,174 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	return 0;
 }
 
+
+static void
+table_entry_list_free(struct rte_swx_table_entry_list *list)
+{
+	for ( ; ; ) {
+		struct rte_swx_table_entry *entry;
+
+		entry = TAILQ_FIRST(list);
+		if (!entry)
+			break;
+
+		TAILQ_REMOVE(list, entry, node);
+		table_entry_free(entry);
+	}
+}
+
+static int
+table_entry_list_duplicate(struct rte_swx_ctl_pipeline *ctl,
+			   uint32_t table_id,
+			   struct rte_swx_table_entry_list *dst,
+			   struct rte_swx_table_entry_list *src)
+{
+	struct rte_swx_table_entry *src_entry;
+
+	TAILQ_FOREACH(src_entry, src, node) {
+		struct rte_swx_table_entry *dst_entry;
+
+		dst_entry = table_entry_duplicate(ctl, table_id, src_entry, 1, 1);
+		if (!dst_entry)
+			goto error;
+
+		TAILQ_INSERT_TAIL(dst, dst_entry, node);
+	}
+
+	return 0;
+
+error:
+	table_entry_list_free(dst);
+	return -ENOMEM;
+}
+
+/* This commit stage contains all the operations that can fail; in case ANY of
+ * them fails for ANY table, ALL of them are rolled back for ALL the tables.
+ */
 static int
-table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
+table_rollfwd0(struct rte_swx_ctl_pipeline *ctl,
+	       uint32_t table_id,
+	       uint32_t after_swap)
 {
 	struct table *table = &ctl->tables[table_id];
 	struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id];
-	struct rte_swx_table_entry *entry;
 
-	/* Reset counters. */
-	table->n_add = 0;
-	table->n_modify = 0;
-	table->n_delete = 0;
+	if (table->is_stub || !table_is_update_pending(table, 0))
+		return 0;
 
-	/* Add pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_add, node) {
-		int status;
+	/*
+	 * Current table supports incremental update.
+	 */
+	if (table->ops.add) {
+		/* Reset counters. */
+		table->n_add = 0;
+		table->n_modify = 0;
+		table->n_delete = 0;
 
-		status = table->ops.add(ts_next->obj, entry);
-		if (status)
-			return status;
+		/* Add pending rules. */
+		struct rte_swx_table_entry *entry;
 
-		table->n_add++;
-	}
+		TAILQ_FOREACH(entry, &table->pending_add, node) {
+			int status;
 
-	/* Modify pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_modify1, node) {
-		int status;
+			status = table->ops.add(ts_next->obj, entry);
+			if (status)
+				return status;
 
-		status = table->ops.add(ts_next->obj, entry);
-		if (status)
-			return status;
+			table->n_add++;
+		}
 
-		table->n_modify++;
+		/* Modify pending rules. */
+		TAILQ_FOREACH(entry, &table->pending_modify1, node) {
+			int status;
+
+			status = table->ops.add(ts_next->obj, entry);
+			if (status)
+				return status;
+
+			table->n_modify++;
+		}
+
+		/* Delete pending rules. */
+		TAILQ_FOREACH(entry, &table->pending_delete, node) {
+			int status;
+
+			status = table->ops.del(ts_next->obj, entry);
+			if (status)
+				return status;
+
+			table->n_delete++;
+		}
+
+		return 0;
 	}
 
-	/* Delete pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_delete, node) {
+	/*
+	 * Current table does NOT support incremental update.
+	 */
+	if (!after_swap) {
+		struct rte_swx_table_entry_list list;
 		int status;
 
-		status = table->ops.del(ts_next->obj, entry);
+		/* Create updated list of entries included. */
+		TAILQ_INIT(&list);
+
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->entries);
 		if (status)
-			return status;
+			goto error;
 
-		table->n_delete++;
-	}
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->pending_add);
+		if (status)
+			goto error;
 
-	return 0;
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->pending_modify1);
+		if (status)
+			goto error;
+
+		/* Create new table object with the updates included. */
+		ts_next->obj = table->ops.create(&table->params,
+						 &list,
+						 table->info.args,
+						 ctl->numa_node);
+		if (!ts_next->obj) {
+			status = -ENODEV;
+			goto error;
+		}
+
+		table_entry_list_free(&list);
+
+		return 0;
+
+error:
+		table_entry_list_free(&list);
+		return status;
+	} else {
+		struct rte_swx_table_state *ts = &ctl->ts[table_id];
+
+		/* Free the old table object. */
+		if (ts_next->obj && table->ops.free)
+			table->ops.free(ts_next->obj);
+
+		/* Copy over the new table object. */
+		ts_next->obj = ts->obj;
+
+		return 0;
+	}
 }
 
+/* This commit stage contains all the operations that cannot fail. They are
+ * executed only if the previous stage was successful for ALL the tables. Hence,
+ * none of these operations has to be rolled back for ANY table.
+ */
 static void
 table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1186,6 +1361,10 @@ table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 	ts_next->default_action_id = action_id;
 }
 
+/* This last commit stage is simply finalizing a successful commit operation.
+ * This stage is only executed if all the previous stages were successful. This
+ * stage cannot fail.
+ */
 static void
 table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1212,43 +1391,66 @@ table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 	table_pending_default_free(table);
 }
 
+/* The rollback stage is only executed when the commit failed, i.e. ANY of the
+ * commit operations that can fail did fail for ANY table. It reverts ALL the
+ * tables to their state before the commit started, as if the commit never
+ * happened.
+ */
 static void
 table_rollback(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
 	struct table *table = &ctl->tables[table_id];
 	struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id];
-	struct rte_swx_table_entry *entry;
 
-	/* Add back all the entries that were just deleted. */
-	TAILQ_FOREACH(entry, &table->pending_delete, node) {
-		if (!table->n_delete)
-			break;
+	if (table->is_stub || !table_is_update_pending(table, 0))
+		return;
 
-		table->ops.add(ts_next->obj, entry);
-		table->n_delete--;
-	}
+	if (table->ops.add) {
+		struct rte_swx_table_entry *entry;
 
-	/* Add back the old copy for all the entries that were just
-	 * modified.
-	 */
-	TAILQ_FOREACH(entry, &table->pending_modify0, node) {
-		if (!table->n_modify)
-			break;
+		/* Add back all the entries that were just deleted. */
+		TAILQ_FOREACH(entry, &table->pending_delete, node) {
+			if (!table->n_delete)
+				break;
 
-		table->ops.add(ts_next->obj, entry);
-		table->n_modify--;
-	}
+			table->ops.add(ts_next->obj, entry);
+			table->n_delete--;
+		}
 
-	/* Delete all the entries that were just added. */
-	TAILQ_FOREACH(entry, &table->pending_add, node) {
-		if (!table->n_add)
-			break;
+		/* Add back the old copy for all the entries that were just
+		 * modified.
+		 */
+		TAILQ_FOREACH(entry, &table->pending_modify0, node) {
+			if (!table->n_modify)
+				break;
+
+			table->ops.add(ts_next->obj, entry);
+			table->n_modify--;
+		}
 
-		table->ops.del(ts_next->obj, entry);
-		table->n_add--;
+		/* Delete all the entries that were just added. */
+		TAILQ_FOREACH(entry, &table->pending_add, node) {
+			if (!table->n_add)
+				break;
+
+			table->ops.del(ts_next->obj, entry);
+			table->n_add--;
+		}
+	} else {
+		struct rte_swx_table_state *ts = &ctl->ts[table_id];
+
+		/* Free the new table object, as update was cancelled. */
+		if (ts_next->obj && table->ops.free)
+			table->ops.free(ts_next->obj);
+
+		/* Reinstate the old table object. */
+		ts_next->obj = ts->obj;
 	}
 }
 
+/* This stage is conditionally executed (as instructed by the user) after a
+ * failed commit operation to remove ALL the pending work for ALL the tables.
+ */
 static void
 table_abort(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1290,7 +1492,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail)
 	 * ts.
 	 */
 	for (i = 0; i < ctl->info.n_tables; i++) {
-		status = table_rollfwd0(ctl, i);
+		status = table_rollfwd0(ctl, i, 0);
 		if (status)
 			goto rollback;
 	}
@@ -1310,7 +1512,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail)
 	/* Operate the changes on the current ts_next, which is the previous ts.
 	 */
 	for (i = 0; i < ctl->info.n_tables; i++) {
-		table_rollfwd0(ctl, i);
+		table_rollfwd0(ctl, i, 1);
 		table_rollfwd1(ctl, i);
 		table_rollfwd2(ctl, i);
 	}
@@ -1444,11 +1646,11 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				mask = field_hton(mask, mf->n_bits);
 		}
 
-			/* Copy to entry. */
-			if (entry->key_mask)
-				memcpy(&entry->key_mask[offset],
-				       (uint8_t *)&mask,
-				       mf->n_bits / 8);
+		/* Copy to entry. */
+		if (entry->key_mask)
+			memcpy(&entry->key_mask[offset],
+			       (uint8_t *)&mask,
+			       mf->n_bits / 8);
 
 		/*
 		 * Value.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] table: add table entry priority
  2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
@ 2021-02-16 20:21 ` Cristian Dumitrescu
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 5/5] table: add wildcard match table type Cristian Dumitrescu
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  4 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:21 UTC (permalink / raw)
  To: dev

Add support for table entry priority, which is required for the
wildcard match/ACL table type.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 27 +++++++++++++++++++++++++++
 lib/librte_table/rte_swx_table.h  |  9 +++++++++
 2 files changed, 36 insertions(+)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index 3e8e283c3..38711b15c 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -386,6 +386,9 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 			       entry->key_mask,
 			       table->params.key_size);
 		}
+
+		/* key_priority. */
+		new_entry->key_priority = entry->key_priority;
 	}
 
 	if (data_duplicate) {
@@ -1673,6 +1676,28 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 	tokens += 1 + table->info.n_match_fields;
 	n_tokens -= 1 + table->info.n_match_fields;
 
+	/*
+	 * Match priority.
+	 */
+	if (n_tokens && !strcmp(tokens[0], "priority")) {
+		char *priority = tokens[1];
+		uint32_t val;
+
+		if (n_tokens < 2)
+			goto error;
+
+		/* Parse. */
+		val = strtoul(priority, &priority, 0);
+		if (priority[0])
+			goto error;
+
+		/* Copy to entry. */
+		entry->key_priority = val;
+
+		tokens += 2;
+		n_tokens -= 2;
+	}
+
 	/*
 	 * Action.
 	 */
@@ -1769,6 +1794,8 @@ table_entry_printf(FILE *f,
 			fprintf(f, "%02x", entry->key_mask[i]);
 	}
 
+	fprintf(f, " priority %u", entry->key_priority);
+
 	fprintf(f, " action %s ", action->info.name);
 	for (i = 0; i < action->data_size; i++)
 		fprintf(f, "%02x", entry->action_data[i]);
diff --git a/lib/librte_table/rte_swx_table.h b/lib/librte_table/rte_swx_table.h
index 5a3137ec5..00446718f 100644
--- a/lib/librte_table/rte_swx_table.h
+++ b/lib/librte_table/rte_swx_table.h
@@ -89,6 +89,15 @@ struct rte_swx_table_entry {
 	 */
 	uint64_t key_signature;
 
+	/** Key priority for the current entry. Useful for wildcard match (as
+	 * match rules are commonly overlapping with other rules), ignored for
+	 * exact match (as match rules never overlap, hence all rules have the
+	 * same match priority) and for LPM (match priority is driven by the
+	 * prefix length, with non-overlapping prefixes essentially having the
+	 * same match priority). Value 0 indicates the highest match priority.
+	 */
+	uint32_t key_priority;
+
 	/** Action ID for the current entry. */
 	uint64_t action_id;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] table: add wildcard match table type
  2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
                   ` (2 preceding siblings ...)
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 4/5] table: add table entry priority Cristian Dumitrescu
@ 2021-02-16 20:21 ` Cristian Dumitrescu
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  4 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:21 UTC (permalink / raw)
  To: dev; +Cc: Churchill Khangar

Add the widlcard match/ACL table type for the SWX pipeline, which is
used under the hood by the table instruction.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Signed-off-by: Churchill Khangar <churchill.khangar@intel.com>
---
 doc/api/doxy-api-index.md           |   1 +
 examples/pipeline/obj.c             |   8 +
 lib/librte_table/meson.build        |   8 +-
 lib/librte_table/rte_swx_table_wm.c | 470 ++++++++++++++++++++++++++++
 lib/librte_table/rte_swx_table_wm.h |  27 ++
 lib/librte_table/version.map        |   3 +
 6 files changed, 515 insertions(+), 2 deletions(-)
 create mode 100644 lib/librte_table/rte_swx_table_wm.c
 create mode 100644 lib/librte_table/rte_swx_table_wm.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 748514e24..94e9937be 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -187,6 +187,7 @@ The public API headers are grouped by topics:
   * SWX table:
     [table]            (@ref rte_swx_table.h),
     [table_em]         (@ref rte_swx_table_em.h)
+    [table_wm]         (@ref rte_swx_table_wm.h)
   * [graph]            (@ref rte_graph.h):
     [graph_worker]     (@ref rte_graph_worker.h)
   * graph_nodes:
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 84bbcf2b2..7be61228b 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -11,6 +11,7 @@
 #include <rte_swx_port_ethdev.h>
 #include <rte_swx_port_source_sink.h>
 #include <rte_swx_table_em.h>
+#include <rte_swx_table_wm.h>
 #include <rte_swx_pipeline.h>
 #include <rte_swx_ctl.h>
 
@@ -415,6 +416,13 @@ pipeline_create(struct obj *obj, const char *name, int numa_node)
 	if (status)
 		goto error;
 
+	status = rte_swx_pipeline_table_type_register(p,
+		"wildcard",
+		RTE_SWX_TABLE_MATCH_WILDCARD,
+		&rte_swx_table_wildcard_match_ops);
+	if (status)
+		goto error;
+
 	/* Node allocation */
 	pipeline = calloc(1, sizeof(struct pipeline));
 	if (pipeline == NULL)
diff --git a/lib/librte_table/meson.build b/lib/librte_table/meson.build
index aa1e1d038..007ffe013 100644
--- a/lib/librte_table/meson.build
+++ b/lib/librte_table/meson.build
@@ -12,7 +12,9 @@ sources = files('rte_table_acl.c',
 		'rte_table_hash_lru.c',
 		'rte_table_array.c',
 		'rte_table_stub.c',
-		'rte_swx_table_em.c',)
+		'rte_swx_table_em.c',
+		'rte_swx_table_wm.c',
+		)
 headers = files('rte_table.h',
 		'rte_table_acl.h',
 		'rte_table_lpm.h',
@@ -24,7 +26,9 @@ headers = files('rte_table.h',
 		'rte_table_array.h',
 		'rte_table_stub.h',
 		'rte_swx_table.h',
-		'rte_swx_table_em.h',)
+		'rte_swx_table_em.h',
+		'rte_swx_table_wm.h',
+		)
 deps += ['mbuf', 'port', 'lpm', 'hash', 'acl']
 
 indirect_headers += files('rte_lru_x86.h',
diff --git a/lib/librte_table/rte_swx_table_wm.c b/lib/librte_table/rte_swx_table_wm.c
new file mode 100644
index 000000000..9924231b3
--- /dev/null
+++ b/lib/librte_table/rte_swx_table_wm.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <rte_common.h>
+#include <rte_prefetch.h>
+#include <rte_cycles.h>
+#include <rte_acl.h>
+
+#include "rte_swx_table_wm.h"
+
+#ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
+#define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
+#endif
+
+#if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
+
+#include <rte_malloc.h>
+
+static void *
+env_malloc(size_t size, size_t alignment, int numa_node)
+{
+	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
+}
+
+static void
+env_free(void *start, size_t size __rte_unused)
+{
+	rte_free(start);
+}
+
+#else
+
+#include <numa.h>
+
+static void *
+env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
+{
+	return numa_alloc_onnode(size, numa_node);
+}
+
+static void
+env_free(void *start, size_t size)
+{
+	numa_free(start, size);
+}
+
+#endif
+
+static char *get_unique_name(void)
+{
+	char *name;
+	uint64_t *tsc;
+
+	name = calloc(7, 1);
+	if (!name)
+		return NULL;
+
+	tsc = (uint64_t *) name;
+	*tsc = rte_get_tsc_cycles();
+	return name;
+}
+
+static uint32_t
+count_entries(struct rte_swx_table_entry_list *entries)
+{
+	struct rte_swx_table_entry *entry;
+	uint32_t n_entries = 0;
+
+	if (!entries)
+		return 0;
+
+	TAILQ_FOREACH(entry, entries, node)
+		n_entries++;
+
+	return n_entries;
+}
+
+static int
+acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
+{
+	uint32_t byte_id = 0, field_id = 0;
+
+	/* cfg->num_categories. */
+	cfg->num_categories = 1;
+
+	/* cfg->defs and cfg->num_fields. */
+	for (byte_id = 0; byte_id < p->key_size; ) {
+		uint32_t field_size = field_id ? 4 : 1;
+		uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
+
+		if (!byte) {
+			byte_id++;
+			continue;
+		}
+
+		if (field_id == RTE_ACL_MAX_FIELDS)
+			return -1;
+
+		cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
+		cfg->defs[field_id].size = field_size;
+		cfg->defs[field_id].field_index = field_id;
+		cfg->defs[field_id].input_index = field_id;
+		cfg->defs[field_id].offset = p->key_offset + byte_id;
+
+		field_id++;
+		byte_id += field_size;
+	}
+
+	if (!field_id)
+		return -1;
+
+	cfg->num_fields = field_id;
+
+	/* cfg->max_size. */
+	cfg->max_size = 0;
+
+	return 0;
+}
+
+static void
+acl_table_rule_field8(uint8_t *value,
+	uint8_t *mask,
+	uint8_t *key_mask0,
+	uint8_t *key_mask,
+	uint8_t *key,
+	uint32_t offset)
+{
+	uint8_t km0, km;
+
+	km0 = key_mask0 ? key_mask0[offset] : 0xFF;
+	km = key_mask ? key_mask[offset] : 0xFF;
+
+	*value = key[offset];
+	*mask = km0 & km;
+}
+
+static void
+acl_table_rule_field32(uint32_t *value,
+	uint32_t *mask,
+	uint8_t *key_mask0,
+	uint8_t *key_mask,
+	uint8_t *key,
+	uint32_t key_size,
+	uint32_t offset)
+{
+	uint32_t km0[4], km[4], k[4];
+	uint32_t byte_id;
+
+	/* Byte 0 = MSB, byte 3 = LSB. */
+	for (byte_id = 0; byte_id < 4; byte_id++) {
+		if (offset + byte_id >= key_size) {
+			km0[byte_id] = 0;
+			km[byte_id] = 0;
+			k[byte_id] = 0;
+			continue;
+		}
+
+		km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
+		km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
+		k[byte_id] = key[offset + byte_id];
+	}
+
+	*value = (k[0] << 24) |
+		 (k[1] << 16) |
+		 (k[2] << 8) |
+		 k[3];
+
+	*mask = ((km[0] & km0[0]) << 24) |
+		((km[1] & km0[1]) << 16) |
+		((km[2] & km0[2]) << 8) |
+		(km[3] & km0[3]);
+}
+
+RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
+
+static struct rte_acl_rule *
+acl_table_rules_get(struct rte_acl_config *acl_cfg,
+	struct rte_swx_table_params *p,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries)
+{
+	struct rte_swx_table_entry *entry;
+	uint8_t *memory;
+	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
+	uint32_t n_fields = acl_cfg->num_fields;
+	uint32_t rule_id;
+
+	if (!n_entries)
+		return NULL;
+
+	memory = malloc(n_entries * acl_rule_size);
+	if (!memory)
+		return NULL;
+
+	rule_id = 0;
+	TAILQ_FOREACH(entry, entries, node) {
+		uint8_t *m = &memory[rule_id * acl_rule_size];
+		struct acl_rule *acl_rule = (struct acl_rule *)m;
+		uint32_t field_id;
+
+		acl_rule->data.category_mask = 1;
+		acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
+			entry->key_priority;
+		acl_rule->data.userdata = rule_id + 1;
+
+		for (field_id = 0; field_id < n_fields; field_id++) {
+			struct rte_acl_field *f = &acl_rule->field[field_id];
+			uint32_t size = acl_cfg->defs[field_id].size;
+			uint32_t offset = acl_cfg->defs[field_id].offset -
+				p->key_offset;
+
+			if (size == 1) {
+				uint8_t value, mask;
+
+				acl_table_rule_field8(&value,
+						      &mask,
+						      p->key_mask0,
+						      entry->key_mask,
+						      entry->key,
+						      offset);
+
+				f->value.u8 = value;
+				f->mask_range.u8 = mask;
+			} else {
+				uint32_t value, mask;
+
+				acl_table_rule_field32(&value,
+						       &mask,
+						       p->key_mask0,
+						       entry->key_mask,
+						       entry->key,
+						       p->key_size,
+						       offset);
+
+				f->value.u32 = value;
+				f->mask_range.u32 = mask;
+			}
+		}
+
+		rule_id++;
+	}
+
+	return (struct rte_acl_rule *)memory;
+}
+
+/* When the table to be created has no rules, the expected behavior is to always
+ * get lookup miss for any input key. To achieve this, we add a single bogus
+ * rule to the table with the rule user data set to 0, i.e. the value returned
+ * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
+ * miss, a user data of 0 is returned, which for the ACL library is equivalent
+ * to lookup miss.
+ */
+static struct rte_acl_rule *
+acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
+{
+	struct rte_acl_rule *acl_rule;
+	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
+
+	acl_rule = calloc(1, acl_rule_size);
+	if (!acl_rule)
+		return NULL;
+
+	acl_rule->data.category_mask = 1;
+	acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
+	acl_rule->data.userdata = 0;
+
+	memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
+
+	return acl_rule;
+}
+
+static struct rte_acl_ctx *
+acl_table_create(struct rte_swx_table_params *params,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries,
+	int numa_node)
+{
+	struct rte_acl_param acl_params = {0};
+	struct rte_acl_config acl_cfg = {0};
+	struct rte_acl_ctx *acl_ctx = NULL;
+	struct rte_acl_rule *acl_rules = NULL;
+	char *name = NULL;
+	int status = 0;
+
+	/* ACL config data structures. */
+	name = get_unique_name();
+	if (!name) {
+		status = -1;
+		goto free_resources;
+	}
+
+	status = acl_table_cfg_get(&acl_cfg, params);
+	if (status)
+		goto free_resources;
+
+	acl_rules = n_entries ?
+		acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
+		acl_table_rules_default_get(&acl_cfg);
+	if (!acl_rules) {
+		status = -1;
+		goto free_resources;
+	}
+
+	n_entries = n_entries ? n_entries : 1;
+
+	/* ACL create. */
+	acl_params.name = name;
+	acl_params.socket_id = numa_node;
+	acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
+	acl_params.max_rule_num = n_entries;
+
+	acl_ctx = rte_acl_create(&acl_params);
+	if (!acl_ctx) {
+		status = -1;
+		goto free_resources;
+	}
+
+	/* ACL add rules. */
+	status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
+	if (status)
+		goto free_resources;
+
+	/* ACL build. */
+	status = rte_acl_build(acl_ctx, &acl_cfg);
+
+free_resources:
+	if (status && acl_ctx)
+		rte_acl_free(acl_ctx);
+
+	free(acl_rules);
+
+	free(name);
+
+	return status ? NULL : acl_ctx;
+}
+
+static void
+entry_data_copy(uint8_t *data,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries,
+	uint32_t entry_data_size)
+{
+	struct rte_swx_table_entry *entry;
+	uint32_t i = 0;
+
+	if (!n_entries)
+		return;
+
+	TAILQ_FOREACH(entry, entries, node) {
+		uint64_t *d = (uint64_t *)&data[i * entry_data_size];
+
+		d[0] = entry->action_id;
+		memcpy(&d[1], entry->action_data, entry_data_size - 8);
+
+		i++;
+	}
+}
+
+struct table {
+	struct rte_acl_ctx *acl_ctx;
+	uint8_t *data;
+	size_t total_size;
+	uint32_t entry_data_size;
+};
+
+static void
+table_free(void *table)
+{
+	struct table *t = table;
+
+	if (!t)
+		return;
+
+	if (t->acl_ctx)
+		rte_acl_free(t->acl_ctx);
+	env_free(t, t->total_size);
+}
+
+static void *
+table_create(struct rte_swx_table_params *params,
+	     struct rte_swx_table_entry_list *entries,
+	     const char *args __rte_unused,
+	     int numa_node)
+{
+	struct table *t = NULL;
+	size_t meta_sz, data_sz, total_size;
+	uint32_t entry_data_size;
+	uint32_t n_entries = count_entries(entries);
+
+	/* Check input arguments. */
+	if (!params || !params->key_size)
+		goto error;
+
+	/* Memory allocation and initialization. */
+	entry_data_size = 8 + params->action_data_size;
+	meta_sz = sizeof(struct table);
+	data_sz = n_entries * entry_data_size;
+	total_size = meta_sz + data_sz;
+
+	t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
+	if (!t)
+		goto error;
+
+	memset(t, 0, total_size);
+	t->entry_data_size = entry_data_size;
+	t->total_size = total_size;
+	t->data = (uint8_t *)&t[1];
+
+	t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
+	if (!t->acl_ctx)
+		goto error;
+
+	entry_data_copy(t->data, entries, n_entries, entry_data_size);
+
+	return t;
+
+error:
+	table_free(t);
+	return NULL;
+}
+
+struct mailbox {
+
+};
+
+static uint64_t
+table_mailbox_size_get(void)
+{
+	return sizeof(struct mailbox);
+}
+
+static int
+table_lookup(void *table,
+	     void *mailbox __rte_unused,
+	     const uint8_t **key,
+	     uint64_t *action_id,
+	     uint8_t **action_data,
+	     int *hit)
+{
+	struct table *t = table;
+	uint8_t *data;
+	uint32_t user_data;
+
+	rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
+	if (!user_data) {
+		*hit = 0;
+		return 1;
+	}
+
+	data = &t->data[(user_data - 1) * t->entry_data_size];
+	*action_id = ((uint64_t *)data)[0];
+	*action_data = &data[8];
+	*hit = 1;
+	return 1;
+}
+
+struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
+	.footprint_get = NULL,
+	.mailbox_size_get = table_mailbox_size_get,
+	.create = table_create,
+	.add = NULL,
+	.del = NULL,
+	.lkp = (rte_swx_table_lookup_t)table_lookup,
+	.free = table_free,
+};
diff --git a/lib/librte_table/rte_swx_table_wm.h b/lib/librte_table/rte_swx_table_wm.h
new file mode 100644
index 000000000..a716536ca
--- /dev/null
+++ b/lib/librte_table/rte_swx_table_wm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+#ifndef __INCLUDE_RTE_SWX_TABLE_WM_H__
+#define __INCLUDE_RTE_SWX_TABLE_WM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE SWX Wildcard Match Table
+ */
+
+#include <stdint.h>
+
+#include <rte_swx_table.h>
+
+/** Wildcard match table operations. */
+extern struct rte_swx_table_ops rte_swx_table_wildcard_match_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_table/version.map b/lib/librte_table/version.map
index bea2252a4..eb0291ac4 100644
--- a/lib/librte_table/version.map
+++ b/lib/librte_table/version.map
@@ -25,4 +25,7 @@ EXPERIMENTAL {
 	# added in 20.11
 	rte_swx_table_exact_match_ops;
 	rte_swx_table_exact_match_unoptimized_ops;
+
+	# added in 21.05
+	rte_swx_table_wildcard_match_ops;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers
  2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
                   ` (3 preceding siblings ...)
  2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 5/5] table: add wildcard match table type Cristian Dumitrescu
@ 2021-02-16 20:46 ` Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
                     ` (3 more replies)
  4 siblings, 4 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:46 UTC (permalink / raw)
  To: dev

Improve the internal table entry helper routines for key comparison,
entry duplication and checks.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 120 ++++++++++++++++--------------
 1 file changed, 65 insertions(+), 55 deletions(-)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index ee6df4544..af653d7f4 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -234,6 +234,26 @@ table_entry_alloc(struct table *table)
 	return NULL;
 }
 
+static int
+table_entry_key_check_em(struct table *table, struct rte_swx_table_entry *entry)
+{
+	uint8_t *key_mask0 = table->params.key_mask0;
+	uint32_t key_size = table->params.key_size, i;
+
+	if (!entry->key_mask)
+		return 0;
+
+	for (i = 0; i < key_size; i++) {
+		uint8_t km0 = key_mask0[i];
+		uint8_t km = entry->key_mask[i];
+
+		if ((km & km0) != km0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 		  uint32_t table_id,
@@ -242,6 +262,7 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 		  int data_check)
 {
 	struct table *table = &ctl->tables[table_id];
+	int status;
 
 	CHECK(entry, EINVAL);
 
@@ -266,7 +287,9 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl,
 				break;
 
 			case RTE_SWX_TABLE_MATCH_EXACT:
-				CHECK(!entry->key_mask, EINVAL);
+				status = table_entry_key_check_em(table, entry);
+				if (status)
+					return status;
 				break;
 
 			default:
@@ -327,10 +350,7 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 		new_entry->key_signature = entry->key_signature;
 
 		/* key_mask. */
-		if (table->params.match_type != RTE_SWX_TABLE_MATCH_EXACT) {
-			if (!entry->key_mask)
-				goto error;
-
+		if (entry->key_mask) {
 			new_entry->key_mask = malloc(table->params.key_size);
 			if (!new_entry->key_mask)
 				goto error;
@@ -357,18 +377,24 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 
 		/* action_data. */
 		a = &ctl->actions[entry->action_id];
-		if (a->data_size) {
-			if (!entry->action_data)
-				goto error;
+		if (a->data_size && !entry->action_data)
+			goto error;
 
-			new_entry->action_data = malloc(a->data_size);
-			if (!new_entry->action_data)
-				goto error;
+		/* The table layer provisions a constant action data size per
+		 * entry, which should be the largest data size for all the
+		 * actions enabled for the current table, and attempts to copy
+		 * this many bytes each time a table entry is added, even if the
+		 * specific action requires less data or even no data at all,
+		 * hence we always have to allocate the max.
+		 */
+		new_entry->action_data = calloc(1, table->params.action_data_size);
+		if (!new_entry->action_data)
+			goto error;
 
+		if (a->data_size)
 			memcpy(new_entry->action_data,
 			       entry->action_data,
 			       a->data_size);
-		}
 	}
 
 	return new_entry;
@@ -378,58 +404,36 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 	return NULL;
 }
 
-static int
-entry_keycmp_em(struct rte_swx_table_entry *e0,
-		struct rte_swx_table_entry *e1,
-		uint32_t key_size)
-{
-	if (e0->key_signature != e1->key_signature)
-		return 1; /* Not equal. */
-
-	if (memcmp(e0->key, e1->key, key_size))
-		return 1; /* Not equal. */
-
-	return 0; /* Equal */
-}
-
-static int
-entry_keycmp_wm(struct rte_swx_table_entry *e0 __rte_unused,
-		struct rte_swx_table_entry *e1 __rte_unused,
-		uint32_t key_size __rte_unused)
-{
-	/* TBD */
-
-	return 1; /* Not equal */
-}
-
-static int
-entry_keycmp_lpm(struct rte_swx_table_entry *e0 __rte_unused,
-		 struct rte_swx_table_entry *e1 __rte_unused,
-		 uint32_t key_size __rte_unused)
-{
-	/* TBD */
-
-	return 1; /* Not equal */
-}
-
 static int
 table_entry_keycmp(struct table *table,
 		   struct rte_swx_table_entry *e0,
 		   struct rte_swx_table_entry *e1)
 {
-	switch (table->params.match_type) {
-	case RTE_SWX_TABLE_MATCH_EXACT:
-		return entry_keycmp_em(e0, e1, table->params.key_size);
+	uint32_t key_size = table->params.key_size;
+	uint32_t i;
+
+	for (i = 0; i < key_size; i++) {
+		uint8_t *key_mask0 = table->params.key_mask0;
+		uint8_t km0, km[2], k[2];
+
+		km0 = key_mask0 ? key_mask0[i] : 0xFF;
+
+		km[0] = e0->key_mask ? e0->key_mask[i] : 0xFF;
+		km[1] = e1->key_mask ? e1->key_mask[i] : 0xFF;
 
-	case RTE_SWX_TABLE_MATCH_WILDCARD:
-		return entry_keycmp_wm(e0, e1, table->params.key_size);
+		k[0] = e0->key[i];
+		k[1] = e1->key[i];
 
-	case RTE_SWX_TABLE_MATCH_LPM:
-		return entry_keycmp_lpm(e0, e1, table->params.key_size);
+		/* Mask comparison. */
+		if ((km[0] & km0) != (km[1] & km0))
+			return 1; /* Not equal. */
 
-	default:
-		return 1; /* Not equal. */
+		/* Value comparison. */
+		if ((k[0] & km[0] & km0) != (k[1] & km[1] & km0))
+			return 1; /* Not equal. */
 	}
+
+	return 0; /* Equal. */
 }
 
 static struct rte_swx_table_entry *
@@ -893,6 +897,9 @@ rte_swx_ctl_pipeline_table_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	CHECK(table, EINVAL);
 	table_id = table - ctl->tables;
 
+	CHECK(entry, EINVAL);
+	CHECK(!table_entry_check(ctl, table_id, entry, 1, 1), EINVAL);
+
 	new_entry = table_entry_duplicate(ctl, table_id, entry, 1, 1);
 	CHECK(new_entry, ENOMEM);
 
@@ -1095,6 +1102,9 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	table_id = table - ctl->tables;
 	CHECK(!table->info.default_action_is_const, EINVAL);
 
+	CHECK(entry, EINVAL);
+	CHECK(!table_entry_check(ctl, table_id, entry, 0, 1), EINVAL);
+
 	new_entry = table_entry_duplicate(ctl, table_id, entry, 0, 1);
 	CHECK(new_entry, ENOMEM);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
@ 2021-02-16 20:46   ` Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:46 UTC (permalink / raw)
  To: dev; +Cc: Venkata Suresh Kumar P, Churchill Khangar

Improve the table entry parsing: better code structure, enable parsing
for the key field masks, allow comments and empty lines in the table
entry files.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Signed-off-by: Venkata Suresh Kumar P <venkata.suresh.kumar.p@intel.com>
Signed-off-by: Churchill Khangar <churchill.khangar@intel.com>
---
 examples/pipeline/cli.c           |  21 +++-
 lib/librte_pipeline/rte_swx_ctl.c | 172 ++++++++++++++++++++----------
 lib/librte_pipeline/rte_swx_ctl.h |   7 +-
 3 files changed, 141 insertions(+), 59 deletions(-)

diff --git a/examples/pipeline/cli.c b/examples/pipeline/cli.c
index e97e12060..30c2dd34d 100644
--- a/examples/pipeline/cli.c
+++ b/examples/pipeline/cli.c
@@ -881,14 +881,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_add)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_add) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_add, line_id);
 				goto error;
@@ -911,14 +916,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_delete)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_delete) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_delete, line_id);
 				goto error;
@@ -940,14 +950,19 @@ cmd_pipeline_table_update(char **tokens,
 	if (file_default)
 		for (line_id = 1; ; line_id++) {
 			struct rte_swx_table_entry *entry;
+			int is_blank_or_comment;
 
 			if (fgets(line, 2048, file_default) == NULL)
 				break;
 
 			entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl,
 				table_name,
-				line);
+				line,
+				&is_blank_or_comment);
 			if (!entry) {
+				if (is_blank_or_comment)
+					continue;
+
 				snprintf(out, out_size, MSG_FILE_ERR,
 					file_name_default, line_id);
 				goto error;
diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index af653d7f4..4a416bc71 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -1339,19 +1339,32 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl)
 		table_abort(ctl, i);
 }
 
+static int
+token_is_comment(const char *token)
+{
+	if ((token[0] == '#') ||
+	    (token[0] == ';') ||
+	    ((token[0] == '/') && (token[1] == '/')))
+		return 1; /* TRUE. */
+
+	return 0; /* FALSE. */
+}
+
 #define RTE_SWX_CTL_ENTRY_TOKENS_MAX 256
 
 struct rte_swx_table_entry *
 rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				      const char *table_name,
-				      const char *string)
+				      const char *string,
+				      int *is_blank_or_comment)
 {
-	char *tokens[RTE_SWX_CTL_ENTRY_TOKENS_MAX];
+	char *token_array[RTE_SWX_CTL_ENTRY_TOKENS_MAX], **tokens;
 	struct table *table;
 	struct action *action;
 	struct rte_swx_table_entry *entry = NULL;
 	char *s0 = NULL, *s;
 	uint32_t n_tokens = 0, arg_offset = 0, i;
+	int blank_or_comment = 0;
 
 	/* Check input arguments. */
 	if (!ctl)
@@ -1381,37 +1394,66 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		char *token;
 
 		token = strtok_r(s, " \f\n\r\t\v", &s);
-		if (!token)
+		if (!token || token_is_comment(token))
 			break;
 
 		if (n_tokens >= RTE_SWX_CTL_ENTRY_TOKENS_MAX)
 			goto error;
 
-		tokens[n_tokens] = token;
+		token_array[n_tokens] = token;
 		n_tokens++;
 	}
 
-	if ((n_tokens < 3 + table->info.n_match_fields) ||
-	    strcmp(tokens[0], "match") ||
-	    strcmp(tokens[1 + table->info.n_match_fields], "action"))
-		goto error;
-
-	action = action_find(ctl, tokens[2 + table->info.n_match_fields]);
-	if (!action)
+	if (!n_tokens) {
+		blank_or_comment = 1;
 		goto error;
+	}
 
-	if (n_tokens != 3 + table->info.n_match_fields +
-	    action->info.n_args * 2)
-		goto error;
+	tokens = token_array;
 
 	/*
 	 * Match.
 	 */
+	if (n_tokens && strcmp(tokens[0], "match"))
+		goto action;
+
+	if (n_tokens < 1 + table->info.n_match_fields)
+		goto error;
+
 	for (i = 0; i < table->info.n_match_fields; i++) {
 		struct rte_swx_ctl_table_match_field_info *mf = &table->mf[i];
-		char *mf_val = tokens[1 + i];
-		uint64_t val;
+		char *mf_val = tokens[1 + i], *mf_mask = NULL;
+		uint64_t val, mask = UINT64_MAX;
+		uint32_t offset = (mf->offset - table->mf[0].offset) / 8;
+
+		/*
+		 * Mask.
+		 */
+		mf_mask = strchr(mf_val, '/');
+		if (mf_mask) {
+			*mf_mask = 0;
+			mf_mask++;
+
+			/* Parse. */
+			mask = strtoull(mf_mask, &mf_mask, 0);
+			if (mf_mask[0])
+				goto error;
+
+			/* Endianness conversion. */
+			if (mf->is_header)
+				mask = field_hton(mask, mf->n_bits);
+		}
 
+			/* Copy to entry. */
+			if (entry->key_mask)
+				memcpy(&entry->key_mask[offset],
+				       (uint8_t *)&mask,
+				       mf->n_bits / 8);
+
+		/*
+		 * Value.
+		 */
+		/* Parse. */
 		val = strtoull(mf_val, &mf_val, 0);
 		if (mf_val[0])
 			goto error;
@@ -1420,17 +1462,32 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		if (mf->is_header)
 			val = field_hton(val, mf->n_bits);
 
-		/* Copy key and key_mask to entry. */
-		memcpy(&entry->key[(mf->offset - table->mf[0].offset) / 8],
+		/* Copy to entry. */
+		memcpy(&entry->key[offset],
 		       (uint8_t *)&val,
 		       mf->n_bits / 8);
-
-		/* TBD Set entry->key_mask for wildcard and LPM tables. */
 	}
 
+	tokens += 1 + table->info.n_match_fields;
+	n_tokens -= 1 + table->info.n_match_fields;
+
 	/*
 	 * Action.
 	 */
+action:
+	if (n_tokens && strcmp(tokens[0], "action"))
+		goto other;
+
+	if (n_tokens < 2)
+		goto error;
+
+	action = action_find(ctl, tokens[1]);
+	if (!action)
+		goto error;
+
+	if (n_tokens < 2 + action->info.n_args * 2)
+		goto error;
+
 	/* action_id. */
 	entry->action_id = action - ctl->actions;
 
@@ -1441,8 +1498,8 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		uint64_t val;
 		int is_nbo = 0;
 
-		arg_name = tokens[3 + table->info.n_match_fields + i * 2];
-		arg_val = tokens[3 + table->info.n_match_fields + i * 2 + 1];
+		arg_name = tokens[2 + i * 2];
+		arg_val = tokens[2 + i * 2 + 1];
 
 		if (strcmp(arg_name, arg->name) ||
 		    (strlen(arg_val) < 4) ||
@@ -1473,15 +1530,50 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 		arg_offset += arg->n_bits / 8;
 	}
 
+	tokens += 2 + action->info.n_args * 2;
+	n_tokens -= 2 + action->info.n_args * 2;
+
+other:
+	if (n_tokens)
+		goto error;
+
 	free(s0);
 	return entry;
 
 error:
 	table_entry_free(entry);
 	free(s0);
+	if (is_blank_or_comment)
+		*is_blank_or_comment = blank_or_comment;
 	return NULL;
 }
 
+static void
+table_entry_printf(FILE *f,
+		   struct rte_swx_ctl_pipeline *ctl,
+		   struct table *table,
+		   struct rte_swx_table_entry *entry)
+{
+	struct action *action = &ctl->actions[entry->action_id];
+	uint32_t i;
+
+	fprintf(f, "match ");
+	for (i = 0; i < table->params.key_size; i++)
+		fprintf(f, "%02x", entry->key[i]);
+
+	if (entry->key_mask) {
+		fprintf(f, "/");
+		for (i = 0; i < table->params.key_size; i++)
+			fprintf(f, "%02x", entry->key_mask[i]);
+	}
+
+	fprintf(f, " action %s ", action->info.name);
+	for (i = 0; i < action->data_size; i++)
+		fprintf(f, "%02x", entry->action_data[i]);
+
+	fprintf(f, "\n");
+}
+
 int
 rte_swx_ctl_pipeline_table_fprintf(FILE *f,
 				   struct rte_swx_ctl_pipeline *ctl,
@@ -1512,47 +1604,17 @@ rte_swx_ctl_pipeline_table_fprintf(FILE *f,
 
 	/* Table entries. */
 	TAILQ_FOREACH(entry, &table->entries, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
 	TAILQ_FOREACH(entry, &table->pending_modify0, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
 	TAILQ_FOREACH(entry, &table->pending_delete, node) {
-		struct action *action = &ctl->actions[entry->action_id];
-
-		fprintf(f, "match ");
-		for (i = 0; i < table->params.key_size; i++)
-			fprintf(f, "%02x", entry->key[i]);
-
-		fprintf(f, " action %s ", action->info.name);
-		for (i = 0; i < action->data_size; i++)
-			fprintf(f, "%02x", entry->action_data[i]);
-
-		fprintf(f, "\n");
+		table_entry_printf(f, ctl, table, entry);
 		n_entries++;
 	}
 
diff --git a/lib/librte_pipeline/rte_swx_ctl.h b/lib/librte_pipeline/rte_swx_ctl.h
index 32815b69e..530671db1 100644
--- a/lib/librte_pipeline/rte_swx_ctl.h
+++ b/lib/librte_pipeline/rte_swx_ctl.h
@@ -521,6 +521,10 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl);
  *   Table name.
  * @param[in] string
  *   String containing the table entry.
+ * @param[out] is_blank_or_comment
+ *   On error, this argument provides an indication of whether *string* contains
+ *   an invalid table entry (set to zero) or a blank or comment line that should
+ *   typically be ignored (set to a non-zero value).
  * @return
  *   0 on success or the following error codes otherwise:
  *   -EINVAL: Invalid argument.
@@ -529,7 +533,8 @@ __rte_experimental
 struct rte_swx_table_entry *
 rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				      const char *table_name,
-				      const char *string);
+				      const char *string,
+				      int *is_blank_or_comment);
 
 /**
  * Pipeline table print to file
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3 3/5] pipeline: support non-incremental table updates
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
@ 2021-02-16 20:46   ` Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 4/5] table: add table entry priority Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type Cristian Dumitrescu
  3 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:46 UTC (permalink / raw)
  To: dev

Some table types (e.g. exact match/hash) allow for incremental table
updates, while others (e.g. wildcard match/ACL) do not. The former is
already supported, the latter is enabled by this patch.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 315 ++++++++++++++++++++++++------
 1 file changed, 258 insertions(+), 57 deletions(-)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index 4a416bc71..6bef9c311 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -42,11 +42,38 @@ struct table {
 	struct rte_swx_table_ops ops;
 	struct rte_swx_table_params params;
 
+	/* Set of "stable" keys: these keys are currently part of the table;
+	 * these keys will be preserved with no action data changes after the
+	 * next commit.
+	 */
 	struct rte_swx_table_entry_list entries;
+
+	/* Set of new keys: these keys are currently NOT part of the table;
+	 * these keys will be added to the table on the next commit, if
+	 * the commit operation is successful.
+	 */
 	struct rte_swx_table_entry_list pending_add;
+
+	/* Set of keys to be modified: these keys are currently part of the
+	 * table; these keys are still going to be part of the table after the
+	 * next commit, but their action data will be modified if the commit
+	 * operation is successful. The modify0 list contains the keys with the
+	 * current action data, the modify1 list contains the keys with the
+	 * modified action data.
+	 */
 	struct rte_swx_table_entry_list pending_modify0;
 	struct rte_swx_table_entry_list pending_modify1;
+
+	/* Set of keys to be deleted: these keys are currently part of the
+	 * table; these keys are to be deleted from the table on the next
+	 * commit, if the commit operation is successful.
+	 */
 	struct rte_swx_table_entry_list pending_delete;
+
+	/* The pending default action: this is NOT the current default action;
+	 * this will be the new default action after the next commit, if the
+	 * next commit operation is successful.
+	 */
 	struct rte_swx_table_entry *pending_default;
 
 	int is_stub;
@@ -609,6 +636,31 @@ table_pending_default_free(struct table *table)
 	table->pending_default = NULL;
 }
 
+static int
+table_is_update_pending(struct table *table, int consider_pending_default)
+{
+	struct rte_swx_table_entry *e;
+	uint32_t n = 0;
+
+	/* Pending add. */
+	TAILQ_FOREACH(e, &table->pending_add, node)
+		n++;
+
+	/* Pending modify. */
+	TAILQ_FOREACH(e, &table->pending_modify1, node)
+		n++;
+
+	/* Pending delete. */
+	TAILQ_FOREACH(e, &table->pending_delete, node)
+		n++;
+
+	/* Pending default. */
+	if (consider_pending_default && table->pending_default)
+		n++;
+
+	return n;
+}
+
 static void
 table_free(struct rte_swx_ctl_pipeline *ctl)
 {
@@ -680,7 +732,7 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
 		struct rte_swx_table_state *ts_next = &ctl->ts_next[i];
 
 		/* Table object. */
-		if (!table->is_stub) {
+		if (!table->is_stub && table->ops.add) {
 			ts_next->obj = table->ops.create(&table->params,
 							 &table->entries,
 							 table->info.args,
@@ -691,6 +743,9 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl)
 			}
 		}
 
+		if (!table->is_stub && !table->ops.add)
+			ts_next->obj = ts->obj;
+
 		/* Default action data: duplicate from current table state. */
 		ts_next->default_action_data =
 			malloc(table->params.action_data_size);
@@ -1114,54 +1169,173 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl,
 	return 0;
 }
 
+
+static void
+table_entry_list_free(struct rte_swx_table_entry_list *list)
+{
+	for ( ; ; ) {
+		struct rte_swx_table_entry *entry;
+
+		entry = TAILQ_FIRST(list);
+		if (!entry)
+			break;
+
+		TAILQ_REMOVE(list, entry, node);
+		table_entry_free(entry);
+	}
+}
+
+static int
+table_entry_list_duplicate(struct rte_swx_ctl_pipeline *ctl,
+			   uint32_t table_id,
+			   struct rte_swx_table_entry_list *dst,
+			   struct rte_swx_table_entry_list *src)
+{
+	struct rte_swx_table_entry *src_entry;
+
+	TAILQ_FOREACH(src_entry, src, node) {
+		struct rte_swx_table_entry *dst_entry;
+
+		dst_entry = table_entry_duplicate(ctl, table_id, src_entry, 1, 1);
+		if (!dst_entry)
+			goto error;
+
+		TAILQ_INSERT_TAIL(dst, dst_entry, node);
+	}
+
+	return 0;
+
+error:
+	table_entry_list_free(dst);
+	return -ENOMEM;
+}
+
+/* This commit stage contains all the operations that can fail; in case ANY of
+ * them fails for ANY table, ALL of them are rolled back for ALL the tables.
+ */
 static int
-table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
+table_rollfwd0(struct rte_swx_ctl_pipeline *ctl,
+	       uint32_t table_id,
+	       uint32_t after_swap)
 {
 	struct table *table = &ctl->tables[table_id];
+	struct rte_swx_table_state *ts = &ctl->ts[table_id];
 	struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id];
-	struct rte_swx_table_entry *entry;
 
-	/* Reset counters. */
-	table->n_add = 0;
-	table->n_modify = 0;
-	table->n_delete = 0;
+	if (table->is_stub || !table_is_update_pending(table, 0))
+		return 0;
 
-	/* Add pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_add, node) {
-		int status;
+	/*
+	 * Current table supports incremental update.
+	 */
+	if (table->ops.add) {
+		/* Reset counters. */
+		table->n_add = 0;
+		table->n_modify = 0;
+		table->n_delete = 0;
 
-		status = table->ops.add(ts_next->obj, entry);
-		if (status)
-			return status;
+		/* Add pending rules. */
+		struct rte_swx_table_entry *entry;
 
-		table->n_add++;
-	}
+		TAILQ_FOREACH(entry, &table->pending_add, node) {
+			int status;
 
-	/* Modify pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_modify1, node) {
-		int status;
+			status = table->ops.add(ts_next->obj, entry);
+			if (status)
+				return status;
 
-		status = table->ops.add(ts_next->obj, entry);
-		if (status)
-			return status;
+			table->n_add++;
+		}
+
+		/* Modify pending rules. */
+		TAILQ_FOREACH(entry, &table->pending_modify1, node) {
+			int status;
+
+			status = table->ops.add(ts_next->obj, entry);
+			if (status)
+				return status;
+
+			table->n_modify++;
+		}
+
+		/* Delete pending rules. */
+		TAILQ_FOREACH(entry, &table->pending_delete, node) {
+			int status;
 
-		table->n_modify++;
+			status = table->ops.del(ts_next->obj, entry);
+			if (status)
+				return status;
+
+			table->n_delete++;
+		}
+
+		return 0;
 	}
 
-	/* Delete pending rules. */
-	TAILQ_FOREACH(entry, &table->pending_delete, node) {
+	/*
+	 * Current table does NOT support incremental update.
+	 */
+	if (!after_swap) {
+		struct rte_swx_table_entry_list list;
 		int status;
 
-		status = table->ops.del(ts_next->obj, entry);
+		/* Create updated list of entries included. */
+		TAILQ_INIT(&list);
+
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->entries);
+		if (status)
+			goto error;
+
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->pending_add);
+		if (status)
+			goto error;
+
+		status = table_entry_list_duplicate(ctl,
+						    table_id,
+						    &list,
+						    &table->pending_modify1);
 		if (status)
-			return status;
+			goto error;
+
+		/* Create new table object with the updates included. */
+		ts_next->obj = table->ops.create(&table->params,
+						 &list,
+						 table->info.args,
+						 ctl->numa_node);
+		if (!ts_next->obj) {
+			status = -ENODEV;
+			goto error;
+		}
+
+		table_entry_list_free(&list);
+
+		return 0;
 
-		table->n_delete++;
+error:
+		table_entry_list_free(&list);
+		return status;
 	}
 
+	/* Free the old table object. */
+	if (ts_next->obj && table->ops.free)
+		table->ops.free(ts_next->obj);
+
+	/* Copy over the new table object. */
+	ts_next->obj = ts->obj;
+
 	return 0;
 }
 
+/* This commit stage contains all the operations that cannot fail. They are
+ * executed only if the previous stage was successful for ALL the tables. Hence,
+ * none of these operations has to be rolled back for ANY table.
+ */
 static void
 table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1186,6 +1360,10 @@ table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 	ts_next->default_action_id = action_id;
 }
 
+/* This last commit stage is simply finalizing a successful commit operation.
+ * This stage is only executed if all the previous stages were successful. This
+ * stage cannot fail.
+ */
 static void
 table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1212,43 +1390,66 @@ table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 	table_pending_default_free(table);
 }
 
+/* The rollback stage is only executed when the commit failed, i.e. ANY of the
+ * commit operations that can fail did fail for ANY table. It reverts ALL the
+ * tables to their state before the commit started, as if the commit never
+ * happened.
+ */
 static void
 table_rollback(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
 	struct table *table = &ctl->tables[table_id];
 	struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id];
-	struct rte_swx_table_entry *entry;
 
-	/* Add back all the entries that were just deleted. */
-	TAILQ_FOREACH(entry, &table->pending_delete, node) {
-		if (!table->n_delete)
-			break;
+	if (table->is_stub || !table_is_update_pending(table, 0))
+		return;
 
-		table->ops.add(ts_next->obj, entry);
-		table->n_delete--;
-	}
+	if (table->ops.add) {
+		struct rte_swx_table_entry *entry;
 
-	/* Add back the old copy for all the entries that were just
-	 * modified.
-	 */
-	TAILQ_FOREACH(entry, &table->pending_modify0, node) {
-		if (!table->n_modify)
-			break;
+		/* Add back all the entries that were just deleted. */
+		TAILQ_FOREACH(entry, &table->pending_delete, node) {
+			if (!table->n_delete)
+				break;
 
-		table->ops.add(ts_next->obj, entry);
-		table->n_modify--;
-	}
+			table->ops.add(ts_next->obj, entry);
+			table->n_delete--;
+		}
 
-	/* Delete all the entries that were just added. */
-	TAILQ_FOREACH(entry, &table->pending_add, node) {
-		if (!table->n_add)
-			break;
+		/* Add back the old copy for all the entries that were just
+		 * modified.
+		 */
+		TAILQ_FOREACH(entry, &table->pending_modify0, node) {
+			if (!table->n_modify)
+				break;
+
+			table->ops.add(ts_next->obj, entry);
+			table->n_modify--;
+		}
 
-		table->ops.del(ts_next->obj, entry);
-		table->n_add--;
+		/* Delete all the entries that were just added. */
+		TAILQ_FOREACH(entry, &table->pending_add, node) {
+			if (!table->n_add)
+				break;
+
+			table->ops.del(ts_next->obj, entry);
+			table->n_add--;
+		}
+	} else {
+		struct rte_swx_table_state *ts = &ctl->ts[table_id];
+
+		/* Free the new table object, as update was cancelled. */
+		if (ts_next->obj && table->ops.free)
+			table->ops.free(ts_next->obj);
+
+		/* Reinstate the old table object. */
+		ts_next->obj = ts->obj;
 	}
 }
 
+/* This stage is conditionally executed (as instructed by the user) after a
+ * failed commit operation to remove ALL the pending work for ALL the tables.
+ */
 static void
 table_abort(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id)
 {
@@ -1290,7 +1491,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail)
 	 * ts.
 	 */
 	for (i = 0; i < ctl->info.n_tables; i++) {
-		status = table_rollfwd0(ctl, i);
+		status = table_rollfwd0(ctl, i, 0);
 		if (status)
 			goto rollback;
 	}
@@ -1310,7 +1511,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail)
 	/* Operate the changes on the current ts_next, which is the previous ts.
 	 */
 	for (i = 0; i < ctl->info.n_tables; i++) {
-		table_rollfwd0(ctl, i);
+		table_rollfwd0(ctl, i, 1);
 		table_rollfwd1(ctl, i);
 		table_rollfwd2(ctl, i);
 	}
@@ -1444,11 +1645,11 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 				mask = field_hton(mask, mf->n_bits);
 		}
 
-			/* Copy to entry. */
-			if (entry->key_mask)
-				memcpy(&entry->key_mask[offset],
-				       (uint8_t *)&mask,
-				       mf->n_bits / 8);
+		/* Copy to entry. */
+		if (entry->key_mask)
+			memcpy(&entry->key_mask[offset],
+			       (uint8_t *)&mask,
+			       mf->n_bits / 8);
 
 		/*
 		 * Value.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3 4/5] table: add table entry priority
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
@ 2021-02-16 20:46   ` Cristian Dumitrescu
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type Cristian Dumitrescu
  3 siblings, 0 replies; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:46 UTC (permalink / raw)
  To: dev

Add support for table entry priority, which is required for the
wildcard match/ACL table type.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 lib/librte_pipeline/rte_swx_ctl.c | 27 +++++++++++++++++++++++++++
 lib/librte_table/rte_swx_table.h  |  9 +++++++++
 2 files changed, 36 insertions(+)

diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c
index 6bef9c311..2e4538bd0 100644
--- a/lib/librte_pipeline/rte_swx_ctl.c
+++ b/lib/librte_pipeline/rte_swx_ctl.c
@@ -386,6 +386,9 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl,
 			       entry->key_mask,
 			       table->params.key_size);
 		}
+
+		/* key_priority. */
+		new_entry->key_priority = entry->key_priority;
 	}
 
 	if (data_duplicate) {
@@ -1672,6 +1675,28 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl,
 	tokens += 1 + table->info.n_match_fields;
 	n_tokens -= 1 + table->info.n_match_fields;
 
+	/*
+	 * Match priority.
+	 */
+	if (n_tokens && !strcmp(tokens[0], "priority")) {
+		char *priority = tokens[1];
+		uint32_t val;
+
+		if (n_tokens < 2)
+			goto error;
+
+		/* Parse. */
+		val = strtoul(priority, &priority, 0);
+		if (priority[0])
+			goto error;
+
+		/* Copy to entry. */
+		entry->key_priority = val;
+
+		tokens += 2;
+		n_tokens -= 2;
+	}
+
 	/*
 	 * Action.
 	 */
@@ -1768,6 +1793,8 @@ table_entry_printf(FILE *f,
 			fprintf(f, "%02x", entry->key_mask[i]);
 	}
 
+	fprintf(f, " priority %u", entry->key_priority);
+
 	fprintf(f, " action %s ", action->info.name);
 	for (i = 0; i < action->data_size; i++)
 		fprintf(f, "%02x", entry->action_data[i]);
diff --git a/lib/librte_table/rte_swx_table.h b/lib/librte_table/rte_swx_table.h
index 5a3137ec5..00446718f 100644
--- a/lib/librte_table/rte_swx_table.h
+++ b/lib/librte_table/rte_swx_table.h
@@ -89,6 +89,15 @@ struct rte_swx_table_entry {
 	 */
 	uint64_t key_signature;
 
+	/** Key priority for the current entry. Useful for wildcard match (as
+	 * match rules are commonly overlapping with other rules), ignored for
+	 * exact match (as match rules never overlap, hence all rules have the
+	 * same match priority) and for LPM (match priority is driven by the
+	 * prefix length, with non-overlapping prefixes essentially having the
+	 * same match priority). Value 0 indicates the highest match priority.
+	 */
+	uint32_t key_priority;
+
 	/** Action ID for the current entry. */
 	uint64_t action_id;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type
  2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
                     ` (2 preceding siblings ...)
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 4/5] table: add table entry priority Cristian Dumitrescu
@ 2021-02-16 20:46   ` Cristian Dumitrescu
  2021-03-23 18:47     ` Thomas Monjalon
  3 siblings, 1 reply; 11+ messages in thread
From: Cristian Dumitrescu @ 2021-02-16 20:46 UTC (permalink / raw)
  To: dev; +Cc: Churchill Khangar

Add the widlcard match/ACL table type for the SWX pipeline, which is
used under the hood by the table instruction.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Signed-off-by: Churchill Khangar <churchill.khangar@intel.com>
---
 doc/api/doxy-api-index.md           |   1 +
 examples/pipeline/obj.c             |   8 +
 lib/librte_table/meson.build        |   8 +-
 lib/librte_table/rte_swx_table_wm.c | 470 ++++++++++++++++++++++++++++
 lib/librte_table/rte_swx_table_wm.h |  27 ++
 lib/librte_table/version.map        |   3 +
 6 files changed, 515 insertions(+), 2 deletions(-)
 create mode 100644 lib/librte_table/rte_swx_table_wm.c
 create mode 100644 lib/librte_table/rte_swx_table_wm.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 748514e24..94e9937be 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -187,6 +187,7 @@ The public API headers are grouped by topics:
   * SWX table:
     [table]            (@ref rte_swx_table.h),
     [table_em]         (@ref rte_swx_table_em.h)
+    [table_wm]         (@ref rte_swx_table_wm.h)
   * [graph]            (@ref rte_graph.h):
     [graph_worker]     (@ref rte_graph_worker.h)
   * graph_nodes:
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 84bbcf2b2..7be61228b 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -11,6 +11,7 @@
 #include <rte_swx_port_ethdev.h>
 #include <rte_swx_port_source_sink.h>
 #include <rte_swx_table_em.h>
+#include <rte_swx_table_wm.h>
 #include <rte_swx_pipeline.h>
 #include <rte_swx_ctl.h>
 
@@ -415,6 +416,13 @@ pipeline_create(struct obj *obj, const char *name, int numa_node)
 	if (status)
 		goto error;
 
+	status = rte_swx_pipeline_table_type_register(p,
+		"wildcard",
+		RTE_SWX_TABLE_MATCH_WILDCARD,
+		&rte_swx_table_wildcard_match_ops);
+	if (status)
+		goto error;
+
 	/* Node allocation */
 	pipeline = calloc(1, sizeof(struct pipeline));
 	if (pipeline == NULL)
diff --git a/lib/librte_table/meson.build b/lib/librte_table/meson.build
index aa1e1d038..007ffe013 100644
--- a/lib/librte_table/meson.build
+++ b/lib/librte_table/meson.build
@@ -12,7 +12,9 @@ sources = files('rte_table_acl.c',
 		'rte_table_hash_lru.c',
 		'rte_table_array.c',
 		'rte_table_stub.c',
-		'rte_swx_table_em.c',)
+		'rte_swx_table_em.c',
+		'rte_swx_table_wm.c',
+		)
 headers = files('rte_table.h',
 		'rte_table_acl.h',
 		'rte_table_lpm.h',
@@ -24,7 +26,9 @@ headers = files('rte_table.h',
 		'rte_table_array.h',
 		'rte_table_stub.h',
 		'rte_swx_table.h',
-		'rte_swx_table_em.h',)
+		'rte_swx_table_em.h',
+		'rte_swx_table_wm.h',
+		)
 deps += ['mbuf', 'port', 'lpm', 'hash', 'acl']
 
 indirect_headers += files('rte_lru_x86.h',
diff --git a/lib/librte_table/rte_swx_table_wm.c b/lib/librte_table/rte_swx_table_wm.c
new file mode 100644
index 000000000..9924231b3
--- /dev/null
+++ b/lib/librte_table/rte_swx_table_wm.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <rte_common.h>
+#include <rte_prefetch.h>
+#include <rte_cycles.h>
+#include <rte_acl.h>
+
+#include "rte_swx_table_wm.h"
+
+#ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
+#define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
+#endif
+
+#if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
+
+#include <rte_malloc.h>
+
+static void *
+env_malloc(size_t size, size_t alignment, int numa_node)
+{
+	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
+}
+
+static void
+env_free(void *start, size_t size __rte_unused)
+{
+	rte_free(start);
+}
+
+#else
+
+#include <numa.h>
+
+static void *
+env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
+{
+	return numa_alloc_onnode(size, numa_node);
+}
+
+static void
+env_free(void *start, size_t size)
+{
+	numa_free(start, size);
+}
+
+#endif
+
+static char *get_unique_name(void)
+{
+	char *name;
+	uint64_t *tsc;
+
+	name = calloc(7, 1);
+	if (!name)
+		return NULL;
+
+	tsc = (uint64_t *) name;
+	*tsc = rte_get_tsc_cycles();
+	return name;
+}
+
+static uint32_t
+count_entries(struct rte_swx_table_entry_list *entries)
+{
+	struct rte_swx_table_entry *entry;
+	uint32_t n_entries = 0;
+
+	if (!entries)
+		return 0;
+
+	TAILQ_FOREACH(entry, entries, node)
+		n_entries++;
+
+	return n_entries;
+}
+
+static int
+acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
+{
+	uint32_t byte_id = 0, field_id = 0;
+
+	/* cfg->num_categories. */
+	cfg->num_categories = 1;
+
+	/* cfg->defs and cfg->num_fields. */
+	for (byte_id = 0; byte_id < p->key_size; ) {
+		uint32_t field_size = field_id ? 4 : 1;
+		uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
+
+		if (!byte) {
+			byte_id++;
+			continue;
+		}
+
+		if (field_id == RTE_ACL_MAX_FIELDS)
+			return -1;
+
+		cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
+		cfg->defs[field_id].size = field_size;
+		cfg->defs[field_id].field_index = field_id;
+		cfg->defs[field_id].input_index = field_id;
+		cfg->defs[field_id].offset = p->key_offset + byte_id;
+
+		field_id++;
+		byte_id += field_size;
+	}
+
+	if (!field_id)
+		return -1;
+
+	cfg->num_fields = field_id;
+
+	/* cfg->max_size. */
+	cfg->max_size = 0;
+
+	return 0;
+}
+
+static void
+acl_table_rule_field8(uint8_t *value,
+	uint8_t *mask,
+	uint8_t *key_mask0,
+	uint8_t *key_mask,
+	uint8_t *key,
+	uint32_t offset)
+{
+	uint8_t km0, km;
+
+	km0 = key_mask0 ? key_mask0[offset] : 0xFF;
+	km = key_mask ? key_mask[offset] : 0xFF;
+
+	*value = key[offset];
+	*mask = km0 & km;
+}
+
+static void
+acl_table_rule_field32(uint32_t *value,
+	uint32_t *mask,
+	uint8_t *key_mask0,
+	uint8_t *key_mask,
+	uint8_t *key,
+	uint32_t key_size,
+	uint32_t offset)
+{
+	uint32_t km0[4], km[4], k[4];
+	uint32_t byte_id;
+
+	/* Byte 0 = MSB, byte 3 = LSB. */
+	for (byte_id = 0; byte_id < 4; byte_id++) {
+		if (offset + byte_id >= key_size) {
+			km0[byte_id] = 0;
+			km[byte_id] = 0;
+			k[byte_id] = 0;
+			continue;
+		}
+
+		km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
+		km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
+		k[byte_id] = key[offset + byte_id];
+	}
+
+	*value = (k[0] << 24) |
+		 (k[1] << 16) |
+		 (k[2] << 8) |
+		 k[3];
+
+	*mask = ((km[0] & km0[0]) << 24) |
+		((km[1] & km0[1]) << 16) |
+		((km[2] & km0[2]) << 8) |
+		(km[3] & km0[3]);
+}
+
+RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
+
+static struct rte_acl_rule *
+acl_table_rules_get(struct rte_acl_config *acl_cfg,
+	struct rte_swx_table_params *p,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries)
+{
+	struct rte_swx_table_entry *entry;
+	uint8_t *memory;
+	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
+	uint32_t n_fields = acl_cfg->num_fields;
+	uint32_t rule_id;
+
+	if (!n_entries)
+		return NULL;
+
+	memory = malloc(n_entries * acl_rule_size);
+	if (!memory)
+		return NULL;
+
+	rule_id = 0;
+	TAILQ_FOREACH(entry, entries, node) {
+		uint8_t *m = &memory[rule_id * acl_rule_size];
+		struct acl_rule *acl_rule = (struct acl_rule *)m;
+		uint32_t field_id;
+
+		acl_rule->data.category_mask = 1;
+		acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
+			entry->key_priority;
+		acl_rule->data.userdata = rule_id + 1;
+
+		for (field_id = 0; field_id < n_fields; field_id++) {
+			struct rte_acl_field *f = &acl_rule->field[field_id];
+			uint32_t size = acl_cfg->defs[field_id].size;
+			uint32_t offset = acl_cfg->defs[field_id].offset -
+				p->key_offset;
+
+			if (size == 1) {
+				uint8_t value, mask;
+
+				acl_table_rule_field8(&value,
+						      &mask,
+						      p->key_mask0,
+						      entry->key_mask,
+						      entry->key,
+						      offset);
+
+				f->value.u8 = value;
+				f->mask_range.u8 = mask;
+			} else {
+				uint32_t value, mask;
+
+				acl_table_rule_field32(&value,
+						       &mask,
+						       p->key_mask0,
+						       entry->key_mask,
+						       entry->key,
+						       p->key_size,
+						       offset);
+
+				f->value.u32 = value;
+				f->mask_range.u32 = mask;
+			}
+		}
+
+		rule_id++;
+	}
+
+	return (struct rte_acl_rule *)memory;
+}
+
+/* When the table to be created has no rules, the expected behavior is to always
+ * get lookup miss for any input key. To achieve this, we add a single bogus
+ * rule to the table with the rule user data set to 0, i.e. the value returned
+ * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
+ * miss, a user data of 0 is returned, which for the ACL library is equivalent
+ * to lookup miss.
+ */
+static struct rte_acl_rule *
+acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
+{
+	struct rte_acl_rule *acl_rule;
+	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
+
+	acl_rule = calloc(1, acl_rule_size);
+	if (!acl_rule)
+		return NULL;
+
+	acl_rule->data.category_mask = 1;
+	acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
+	acl_rule->data.userdata = 0;
+
+	memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
+
+	return acl_rule;
+}
+
+static struct rte_acl_ctx *
+acl_table_create(struct rte_swx_table_params *params,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries,
+	int numa_node)
+{
+	struct rte_acl_param acl_params = {0};
+	struct rte_acl_config acl_cfg = {0};
+	struct rte_acl_ctx *acl_ctx = NULL;
+	struct rte_acl_rule *acl_rules = NULL;
+	char *name = NULL;
+	int status = 0;
+
+	/* ACL config data structures. */
+	name = get_unique_name();
+	if (!name) {
+		status = -1;
+		goto free_resources;
+	}
+
+	status = acl_table_cfg_get(&acl_cfg, params);
+	if (status)
+		goto free_resources;
+
+	acl_rules = n_entries ?
+		acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
+		acl_table_rules_default_get(&acl_cfg);
+	if (!acl_rules) {
+		status = -1;
+		goto free_resources;
+	}
+
+	n_entries = n_entries ? n_entries : 1;
+
+	/* ACL create. */
+	acl_params.name = name;
+	acl_params.socket_id = numa_node;
+	acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
+	acl_params.max_rule_num = n_entries;
+
+	acl_ctx = rte_acl_create(&acl_params);
+	if (!acl_ctx) {
+		status = -1;
+		goto free_resources;
+	}
+
+	/* ACL add rules. */
+	status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
+	if (status)
+		goto free_resources;
+
+	/* ACL build. */
+	status = rte_acl_build(acl_ctx, &acl_cfg);
+
+free_resources:
+	if (status && acl_ctx)
+		rte_acl_free(acl_ctx);
+
+	free(acl_rules);
+
+	free(name);
+
+	return status ? NULL : acl_ctx;
+}
+
+static void
+entry_data_copy(uint8_t *data,
+	struct rte_swx_table_entry_list *entries,
+	uint32_t n_entries,
+	uint32_t entry_data_size)
+{
+	struct rte_swx_table_entry *entry;
+	uint32_t i = 0;
+
+	if (!n_entries)
+		return;
+
+	TAILQ_FOREACH(entry, entries, node) {
+		uint64_t *d = (uint64_t *)&data[i * entry_data_size];
+
+		d[0] = entry->action_id;
+		memcpy(&d[1], entry->action_data, entry_data_size - 8);
+
+		i++;
+	}
+}
+
+struct table {
+	struct rte_acl_ctx *acl_ctx;
+	uint8_t *data;
+	size_t total_size;
+	uint32_t entry_data_size;
+};
+
+static void
+table_free(void *table)
+{
+	struct table *t = table;
+
+	if (!t)
+		return;
+
+	if (t->acl_ctx)
+		rte_acl_free(t->acl_ctx);
+	env_free(t, t->total_size);
+}
+
+static void *
+table_create(struct rte_swx_table_params *params,
+	     struct rte_swx_table_entry_list *entries,
+	     const char *args __rte_unused,
+	     int numa_node)
+{
+	struct table *t = NULL;
+	size_t meta_sz, data_sz, total_size;
+	uint32_t entry_data_size;
+	uint32_t n_entries = count_entries(entries);
+
+	/* Check input arguments. */
+	if (!params || !params->key_size)
+		goto error;
+
+	/* Memory allocation and initialization. */
+	entry_data_size = 8 + params->action_data_size;
+	meta_sz = sizeof(struct table);
+	data_sz = n_entries * entry_data_size;
+	total_size = meta_sz + data_sz;
+
+	t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
+	if (!t)
+		goto error;
+
+	memset(t, 0, total_size);
+	t->entry_data_size = entry_data_size;
+	t->total_size = total_size;
+	t->data = (uint8_t *)&t[1];
+
+	t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
+	if (!t->acl_ctx)
+		goto error;
+
+	entry_data_copy(t->data, entries, n_entries, entry_data_size);
+
+	return t;
+
+error:
+	table_free(t);
+	return NULL;
+}
+
+struct mailbox {
+
+};
+
+static uint64_t
+table_mailbox_size_get(void)
+{
+	return sizeof(struct mailbox);
+}
+
+static int
+table_lookup(void *table,
+	     void *mailbox __rte_unused,
+	     const uint8_t **key,
+	     uint64_t *action_id,
+	     uint8_t **action_data,
+	     int *hit)
+{
+	struct table *t = table;
+	uint8_t *data;
+	uint32_t user_data;
+
+	rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
+	if (!user_data) {
+		*hit = 0;
+		return 1;
+	}
+
+	data = &t->data[(user_data - 1) * t->entry_data_size];
+	*action_id = ((uint64_t *)data)[0];
+	*action_data = &data[8];
+	*hit = 1;
+	return 1;
+}
+
+struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
+	.footprint_get = NULL,
+	.mailbox_size_get = table_mailbox_size_get,
+	.create = table_create,
+	.add = NULL,
+	.del = NULL,
+	.lkp = (rte_swx_table_lookup_t)table_lookup,
+	.free = table_free,
+};
diff --git a/lib/librte_table/rte_swx_table_wm.h b/lib/librte_table/rte_swx_table_wm.h
new file mode 100644
index 000000000..a716536ca
--- /dev/null
+++ b/lib/librte_table/rte_swx_table_wm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+#ifndef __INCLUDE_RTE_SWX_TABLE_WM_H__
+#define __INCLUDE_RTE_SWX_TABLE_WM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE SWX Wildcard Match Table
+ */
+
+#include <stdint.h>
+
+#include <rte_swx_table.h>
+
+/** Wildcard match table operations. */
+extern struct rte_swx_table_ops rte_swx_table_wildcard_match_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_table/version.map b/lib/librte_table/version.map
index bea2252a4..eb0291ac4 100644
--- a/lib/librte_table/version.map
+++ b/lib/librte_table/version.map
@@ -25,4 +25,7 @@ EXPERIMENTAL {
 	# added in 20.11
 	rte_swx_table_exact_match_ops;
 	rte_swx_table_exact_match_unoptimized_ops;
+
+	# added in 21.05
+	rte_swx_table_wildcard_match_ops;
 };
-- 
2.17.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type
  2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type Cristian Dumitrescu
@ 2021-03-23 18:47     ` Thomas Monjalon
  0 siblings, 0 replies; 11+ messages in thread
From: Thomas Monjalon @ 2021-03-23 18:47 UTC (permalink / raw)
  To: Cristian Dumitrescu; +Cc: dev, Churchill Khangar

16/02/2021 21:46, Cristian Dumitrescu:
> Add the widlcard match/ACL table type for the SWX pipeline, which is
> used under the hood by the table instruction.
> 
> Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
> Signed-off-by: Churchill Khangar <churchill.khangar@intel.com>

Series applied, thanks.




^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-03-23 18:47 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-16 20:21 [dpdk-dev] [PATCH v2 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 4/5] table: add table entry priority Cristian Dumitrescu
2021-02-16 20:21 ` [dpdk-dev] [PATCH v2 5/5] table: add wildcard match table type Cristian Dumitrescu
2021-02-16 20:46 ` [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers Cristian Dumitrescu
2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing Cristian Dumitrescu
2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 3/5] pipeline: support non-incremental table updates Cristian Dumitrescu
2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 4/5] table: add table entry priority Cristian Dumitrescu
2021-02-16 20:46   ` [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type Cristian Dumitrescu
2021-03-23 18:47     ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).