DPDK patches and discussions
 help / color / mirror / Atom feed
Search results ordered by [date|relevance]  view[summary|nested|Atom feed]
thread overview below | download: 
* [PATCH v5 2/5] graph: add feature arc option in graph create
  @ 2024-10-14 14:33  4%   ` Nitin Saxena
  0 siblings, 0 replies; 169+ results
From: Nitin Saxena @ 2024-10-14 14:33 UTC (permalink / raw)
  To: Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram, Zhirun Yan,
	Robin Jarry, Christophe Fontaine
  Cc: dev, Nitin Saxena, Pavan Nikhilesh

Added option in graph create to call feature-specific process node
functions. This removes extra overhead for checking feature arc status
in nodes where application is not using feature arc processing

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
---
 doc/guides/rel_notes/release_24_11.rst | 7 +++++++
 lib/graph/graph.c                      | 1 +
 lib/graph/graph_populate.c             | 7 ++++++-
 lib/graph/graph_private.h              | 3 +++
 lib/graph/node.c                       | 2 ++
 lib/graph/rte_graph.h                  | 3 +++
 6 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 1299de886a..451627a331 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -251,6 +251,13 @@ ABI Changes
 
 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
+* graph: Added feature arc specific `feat_arc_proc` node callback function in
+  `struct rte_node_register`. If this function is not NULL and
+  `feature_arc_enable` is set to `true` in `struct rte_graph_param`,
+  rte_graph_walk() calls `feat_arc_proc` callback function instead of `process`
+
+* graph: Added `feature_arc_enable` parameter in `struct rte_graph_param` for
+  calling non-NULL `feat_arc_proc` callback function by `rte_graph_walk()`
 
 Known Issues
 ------------
diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index dff8e690a8..a764c5824e 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -455,6 +455,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm)
 	graph->parent_id = RTE_GRAPH_ID_INVALID;
 	graph->lcore_id = RTE_MAX_LCORE;
 	graph->num_pkt_to_capture = prm->num_pkt_to_capture;
+	graph->feature_arc_enabled = prm->feature_arc_enable;
 	if (prm->pcap_filename)
 		rte_strscpy(graph->pcap_filename, prm->pcap_filename, RTE_GRAPH_PCAP_FILE_SZ);
 
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index ed596a7711..5d8aa7b903 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -79,8 +79,13 @@ graph_nodes_populate(struct graph *_graph)
 		if (graph_pcap_is_enable()) {
 			node->process = graph_pcap_dispatch;
 			node->original_process = graph_node->node->process;
-		} else
+			if (_graph->feature_arc_enabled && graph_node->node->feat_arc_proc)
+				node->original_process = graph_node->node->feat_arc_proc;
+		} else {
 			node->process = graph_node->node->process;
+			if (_graph->feature_arc_enabled && graph_node->node->feat_arc_proc)
+				node->process = graph_node->node->feat_arc_proc;
+		}
 		memcpy(node->name, graph_node->node->name, RTE_GRAPH_NAMESIZE);
 		pid = graph_node->node->parent_id;
 		if (pid != RTE_NODE_ID_INVALID) { /* Cloned node */
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index d557d55f2d..58ba0abeff 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -56,6 +56,7 @@ struct node {
 	unsigned int lcore_id;
 	/**< Node runs on the Lcore ID used for mcore dispatch model. */
 	rte_node_process_t process;   /**< Node process function. */
+	rte_node_process_t feat_arc_proc; /**< Node feature-arch process function. */
 	rte_node_init_t init;         /**< Node init function. */
 	rte_node_fini_t fini;	      /**< Node fini function. */
 	rte_node_t id;		      /**< Allocated identifier for the node. */
@@ -126,6 +127,8 @@ struct graph {
 	/**< Number of packets to be captured per core. */
 	char pcap_filename[RTE_GRAPH_PCAP_FILE_SZ];
 	/**< pcap file name/path. */
+	uint8_t feature_arc_enabled;
+	/**< Graph feature arc. */
 	STAILQ_HEAD(gnode_list, graph_node) node_list;
 	/**< Nodes in a graph. */
 };
diff --git a/lib/graph/node.c b/lib/graph/node.c
index 99a9622779..d8fd273543 100644
--- a/lib/graph/node.c
+++ b/lib/graph/node.c
@@ -90,6 +90,7 @@ __rte_node_register(const struct rte_node_register *reg)
 		goto free;
 	node->flags = reg->flags;
 	node->process = reg->process;
+	node->feat_arc_proc = reg->feat_arc_proc;
 	node->init = reg->init;
 	node->fini = reg->fini;
 	node->nb_edges = reg->nb_edges;
@@ -137,6 +138,7 @@ node_clone(struct node *node, const char *name)
 	/* Clone the source node */
 	reg->flags = node->flags;
 	reg->process = node->process;
+	reg->feat_arc_proc = node->feat_arc_proc;
 	reg->init = node->init;
 	reg->fini = node->fini;
 	reg->nb_edges = node->nb_edges;
diff --git a/lib/graph/rte_graph.h b/lib/graph/rte_graph.h
index ecfec2068a..f07272b308 100644
--- a/lib/graph/rte_graph.h
+++ b/lib/graph/rte_graph.h
@@ -172,6 +172,8 @@ struct rte_graph_param {
 			uint32_t mp_capacity; /**< Capacity of memory pool for dispatch model. */
 		} dispatch;
 	};
+
+	bool feature_arc_enable; /**< Enable Graph feature arc. */
 };
 
 /**
@@ -470,6 +472,7 @@ struct rte_node_register {
 	uint64_t flags;		      /**< Node configuration flag. */
 #define RTE_NODE_SOURCE_F (1ULL << 0) /**< Node type is source. */
 	rte_node_process_t process; /**< Node process function. */
+	rte_node_process_t feat_arc_proc; /**< Node feature-arc specific process function. */
 	rte_node_init_t init;       /**< Node init function. */
 	rte_node_fini_t fini;       /**< Node fini function. */
 	rte_node_t id;		    /**< Node Identifier. */
-- 
2.43.0


^ permalink raw reply	[relevance 4%]

* Re: [PATCH v2] doc: announce single-event enqueue/dequeue ABI change
  @ 2024-10-14 14:40  4%     ` Thomas Monjalon
  2024-10-14 14:44  4%     ` David Marchand
  1 sibling, 0 replies; 169+ results
From: Thomas Monjalon @ 2024-10-14 14:40 UTC (permalink / raw)
  To: Mattias Rönnblom, Jerin Jacob
  Cc: David Marchand, Mattias Rönnblom, jerinj, dev,
	Pavan Nikhilesh, Timothy McDaniel, Hemant Agrawal, Sachin Saxena,
	Harry van Haaren, Liang Ma, Peter Mccarthy

14/10/2024 09:18, Jerin Jacob:
> On Sun, Oct 13, 2024 at 12:27 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> >
> > On 2024-10-11 16:42, David Marchand wrote:
> > > On Wed, Jul 5, 2023 at 1:18 PM Mattias Rönnblom
> > > <mattias.ronnblom@ericsson.com> wrote:
> > >>
> > >> Announce the removal of the single-event enqueue and dequeue
> > >> operations from the eventdev ABI.
> > >>
> > >> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > >>
> > >> ---
> > >> PATCH v2: Fix commit subject prefix.
> > >> ---
> > >>   doc/guides/rel_notes/deprecation.rst | 8 ++++++++
> > >>   1 file changed, 8 insertions(+)
> > >>
> > >> diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
> > >> index 66431789b0..ca192d838d 100644
> > >> --- a/doc/guides/rel_notes/deprecation.rst
> > >> +++ b/doc/guides/rel_notes/deprecation.rst
> > >> @@ -153,3 +153,11 @@ Deprecation Notices
> > >>     The new port library API (functions rte_swx_port_*)
> > >>     will gradually transition from experimental to stable status
> > >>     starting with DPDK 23.07 release.
> > >> +
> > >> +* eventdev: The single-event (non-burst) enqueue and dequeue
> > >> +  operations, used by static inline burst enqueue and dequeue
> > >> +  functions in <rte_eventdev.h>, will be removed in DPDK 23.11. This
> > >> +  simplification includes changing the layout and potentially also the
> > >> +  size of the public rte_event_fp_ops struct, breaking the ABI. Since
> > >> +  these functions are not called directly by the application, the API
> > >> +  remains unaffected.
> > >
> > > Looks like it was missed in 23.11, can/should we finish this cleanup in 24.11?
> > >
> > >
> >
> > Yes, sure. Jerin, should I submit a patch?
> 
> David/Thomas, We should be OK to take as rc2. Right? If so, OK.

No problem for -rc2



^ permalink raw reply	[relevance 4%]

* Re: [PATCH v2] doc: announce single-event enqueue/dequeue ABI change
    2024-10-14 14:40  4%     ` Thomas Monjalon
@ 2024-10-14 14:44  4%     ` David Marchand
  1 sibling, 0 replies; 169+ results
From: David Marchand @ 2024-10-14 14:44 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Mattias Rönnblom, Mattias Rönnblom, jerinj,
	Thomas Monjalon, dev, Pavan Nikhilesh, Timothy McDaniel,
	Hemant Agrawal, Sachin Saxena, Harry van Haaren, Liang Ma,
	Peter Mccarthy

On Mon, Oct 14, 2024 at 9:19 AM Jerin Jacob <jerinjacobk@gmail.com> wrote:
> On Sun, Oct 13, 2024 at 12:27 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> >
> > On 2024-10-11 16:42, David Marchand wrote:
> > > On Wed, Jul 5, 2023 at 1:18 PM Mattias Rönnblom
> > > <mattias.ronnblom@ericsson.com> wrote:
> > >>
> > >> Announce the removal of the single-event enqueue and dequeue
> > >> operations from the eventdev ABI.
> > >>
> > >> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> > >>
> > >> ---
> > >> PATCH v2: Fix commit subject prefix.
> > >> ---
> > >>   doc/guides/rel_notes/deprecation.rst | 8 ++++++++
> > >>   1 file changed, 8 insertions(+)
> > >>
> > >> diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
> > >> index 66431789b0..ca192d838d 100644
> > >> --- a/doc/guides/rel_notes/deprecation.rst
> > >> +++ b/doc/guides/rel_notes/deprecation.rst
> > >> @@ -153,3 +153,11 @@ Deprecation Notices
> > >>     The new port library API (functions rte_swx_port_*)
> > >>     will gradually transition from experimental to stable status
> > >>     starting with DPDK 23.07 release.
> > >> +
> > >> +* eventdev: The single-event (non-burst) enqueue and dequeue
> > >> +  operations, used by static inline burst enqueue and dequeue
> > >> +  functions in <rte_eventdev.h>, will be removed in DPDK 23.11. This
> > >> +  simplification includes changing the layout and potentially also the
> > >> +  size of the public rte_event_fp_ops struct, breaking the ABI. Since
> > >> +  these functions are not called directly by the application, the API
> > >> +  remains unaffected.
> > >
> > > Looks like it was missed in 23.11, can/should we finish this cleanup in 24.11?
> > >
> > >
> >
> > Yes, sure. Jerin, should I submit a patch?
>
> David/Thomas, We should be OK to take as rc2. Right? If so, OK.

Afaiu, this cleanup involves removing driver facing API only, and some
driver cleanups.
It should have no impact on application API.

If so, it fits in rc2.


-- 
David Marchand


^ permalink raw reply	[relevance 4%]

* Re: [PATCH v10 0/2] power: introduce PM QoS interface
  @ 2024-10-14 15:27  0%   ` Stephen Hemminger
  2024-10-15  9:30  0%     ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-10-14 15:27 UTC (permalink / raw)
  To: Huisong Li
  Cc: dev, mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, david.marchand, fengchengwen, liuyonglong

On Thu, 12 Sep 2024 10:38:10 +0800
Huisong Li <lihuisong@huawei.com> wrote:

> The deeper the idle state, the lower the power consumption, but the longer
> the resume time. Some service are delay sensitive and very except the low
> resume time, like interrupt packet receiving mode.
> 
> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> interface is used to set and get the resume latency limit on the cpuX for
> userspace. Please see the description in kernel document[1].
> Each cpuidle governor in Linux select which idle state to enter based on
> this CPU resume latency in their idle task.
> 
> The per-CPU PM QoS API can be used to control this CPU's idle state
> selection and limit just enter the shallowest idle state to low the delay
> after sleep by setting strict resume latency (zero value).
> 
> [1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us


This is not a direct critique of this patch.
The power library should have been designed to take a single configuration structure
specifying CPU frequencies, wake up latency, and all the parameters from the kernel.
And there would be a simple API with: rte_power_config_set() and rte_power_config_get().


^ permalink raw reply	[relevance 0%]

* [PATCH v6 1/3] graph: add support for node specific xstats
  @ 2024-10-14 16:10  3%   ` pbhagavatula
    1 sibling, 0 replies; 169+ results
From: pbhagavatula @ 2024-10-14 16:10 UTC (permalink / raw)
  To: jerinj, ndabilpuram, kirankumark, zhirun.yan, rjarry,
	david.marchand, Zhirun Yan
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add ability for Nodes to advertise xstat counters
during registration and increment them in fastpath.
Add support for retrieving/printing stats for node
specific xstats using rte_graph_cluster_stats_get().
Add `rte_node_xstat_increment` API to increment node
specific xstat counters.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Kiran Kumar K <kirankumark@marvell.com>
Reviewed-by: Robin Jarry <rjarry@redhat.com>
---
 doc/guides/prog_guide/graph_lib.rst    | 22 +++++--
 doc/guides/rel_notes/deprecation.rst   |  6 --
 doc/guides/rel_notes/release_24_11.rst |  8 +++
 lib/graph/graph_populate.c             | 20 ++++++-
 lib/graph/graph_private.h              |  3 +
 lib/graph/graph_stats.c                | 79 +++++++++++++++++++++++++-
 lib/graph/node.c                       | 37 +++++++++++-
 lib/graph/rte_graph.h                  | 11 ++++
 lib/graph/rte_graph_worker_common.h    | 23 ++++++++
 lib/graph/version.map                  |  7 +++
 10 files changed, 201 insertions(+), 15 deletions(-)

diff --git a/doc/guides/prog_guide/graph_lib.rst b/doc/guides/prog_guide/graph_lib.rst
index ad09bdfe26..4d9ae84ada 100644
--- a/doc/guides/prog_guide/graph_lib.rst
+++ b/doc/guides/prog_guide/graph_lib.rst
@@ -21,6 +21,7 @@ Features of the Graph library are:
 - Nodes as plugins.
 - Support for out of tree nodes.
 - Inbuilt nodes for packet processing.
+- Node specific xstat counts.
 - Multi-process support.
 - Low overhead graph walk and node enqueue.
 - Low overhead statistics collection infrastructure.
@@ -124,6 +125,18 @@ Source nodes are static nodes created using ``RTE_NODE_REGISTER`` by passing
 While performing the graph walk, the ``process()`` function of all the source
 nodes will be called first. So that these nodes can be used as input nodes for a graph.

+nb_xstats:
+^^^^^^^^^^
+
+The number of xstats that this node can report. The ``xstat_desc[]`` stores the xstat
+descriptions which will later be propagated to stats.
+
+xstat_desc[]:
+^^^^^^^^^^^^^
+
+The dynamic array to store the xstat descriptions that will be reported by this
+node.
+
 Node creation and registration
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * Node implementer creates the node by implementing ops and attributes of
@@ -141,13 +154,13 @@ Link the Nodes to create the graph topology
    Topology after linking the nodes

 Once nodes are available to the program, Application or node public API
-functions can links them together to create a complex packet processing graph.
+functions can link them together to create a complex packet processing graph.

 There are multiple different types of strategies to link the nodes.

 Method (a):
 ^^^^^^^^^^^
-Provide the ``next_nodes[]`` at the node registration time. See  ``struct rte_node_register::nb_edges``.
+Provide the ``next_nodes[]`` at the node registration time. See ``struct rte_node_register::nb_edges``.
 This is a use case to address the static node scheme where one knows upfront the
 ``next_nodes[]`` of the node.

@@ -385,8 +398,9 @@ Understanding the memory layout helps to debug the graph library and
 improve the performance if needed.

 Graph object consists of a header, circular buffer to store the pending
-stream when walking over the graph, and variable-length memory to store
-the ``rte_node`` objects.
+stream when walking over the graph, variable-length memory to store
+the ``rte_node`` objects, and variable-length memory to store the xstat
+reported by each ``rte_node``.

 The graph_nodes_mem_create() creates and populate this memory. The functions
 such as ``rte_graph_walk()`` and ``rte_node_enqueue_*`` use this memory
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 1535ea7abf..8f1d43e18e 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -196,9 +196,3 @@ Deprecation Notices
   will be deprecated and subsequently removed in DPDK 24.11 release.
   Before this, the new port library API (functions rte_swx_port_*)
   will gradually transition from experimental to stable status.
-
-* graph: The graph library data structures will be modified
-  to support node specific errors.
-  The structures ``rte_node``, ``rte_node_register``
-  and ``rte_graph_cluster_node_stats`` will be extended
-  to include node error counters and error description.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 5a423af130..70c88037b4 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -172,6 +172,10 @@ New Features

   * Added independent enqueue feature.

+* **Add node specific xstats for rte_graph**
+
+  * Added ability for node to advertise and update multiple xstat counters,
+    that can be retrieved using rte_graph_cluster_stats_get.

 Removed Items
 -------------
@@ -254,6 +258,10 @@ ABI Changes

 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.

+* graph: To accommodate node specific xstats counters added ``xstar_cntrs``,
+  ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``,
+  added new structure ``rte_node_xstats`` to ``rte_node_register`` and
+  added ``xstat_off`` to ``rte_node``.

 Known Issues
 ------------
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index ed596a7711..eaa48f1a7b 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -39,6 +39,15 @@ graph_fp_mem_calc_size(struct graph *graph)
 		/* Pointer to next nodes(edges) */
 		sz += sizeof(struct rte_node *) * graph_node->node->nb_edges;
 	}
+	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+	graph->xstats_start = sz;
+	/* For 0..N node objects with xstats */
+	STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+		if (graph_node->node->xstats == NULL)
+			continue;
+		sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+		sz += sizeof(uint64_t) * graph_node->node->xstats->nb_xstats;
+	}

 	graph->mem_sz = sz;
 	return sz;
@@ -64,6 +73,7 @@ graph_header_popluate(struct graph *_graph)
 static void
 graph_nodes_populate(struct graph *_graph)
 {
+	rte_graph_off_t xstat_off = _graph->xstats_start;
 	rte_graph_off_t off = _graph->nodes_start;
 	struct rte_graph *graph = _graph->graph;
 	struct graph_node *graph_node;
@@ -99,6 +109,12 @@ graph_nodes_populate(struct graph *_graph)
 						     ->adjacency_list[count]
 						     ->node->name[0];

+		if (graph_node->node->xstats != NULL) {
+			node->xstat_off = xstat_off - off;
+			xstat_off += sizeof(uint64_t) * graph_node->node->xstats->nb_xstats;
+			xstat_off = RTE_ALIGN(xstat_off, RTE_CACHE_LINE_SIZE);
+		}
+
 		off += sizeof(struct rte_node *) * nb_edges;
 		off = RTE_ALIGN(off, RTE_CACHE_LINE_SIZE);
 		node->next = off;
@@ -158,7 +174,7 @@ graph_node_nexts_populate(struct graph *_graph)
 }

 static int
-graph_src_nodes_populate(struct graph *_graph)
+graph_src_nodes_offset_populate(struct graph *_graph)
 {
 	struct rte_graph *graph = _graph->graph;
 	struct graph_node *graph_node;
@@ -193,7 +209,7 @@ graph_fp_mem_populate(struct graph *graph)
 		graph_pcap_init(graph);
 	graph_nodes_populate(graph);
 	rc = graph_node_nexts_populate(graph);
-	rc |= graph_src_nodes_populate(graph);
+	rc |= graph_src_nodes_offset_populate(graph);

 	return rc;
 }
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index d557d55f2d..da48d73587 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -61,6 +61,7 @@ struct node {
 	rte_node_t id;		      /**< Allocated identifier for the node. */
 	rte_node_t parent_id;	      /**< Parent node identifier. */
 	rte_edge_t nb_edges;	      /**< Number of edges from this node. */
+	struct rte_node_xstats *xstats;	      /**< Node specific xstats. */
 	char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
 };

@@ -102,6 +103,8 @@ struct graph {
 	/**< Memzone to store graph data. */
 	rte_graph_off_t nodes_start;
 	/**< Node memory start offset in graph reel. */
+	rte_graph_off_t xstats_start;
+	/**< Node xstats memory start offset in graph reel. */
 	rte_node_t src_node_count;
 	/**< Number of source nodes in a graph. */
 	struct rte_graph *graph;
diff --git a/lib/graph/graph_stats.c b/lib/graph/graph_stats.c
index d71451a17b..a34b4a8200 100644
--- a/lib/graph/graph_stats.c
+++ b/lib/graph/graph_stats.c
@@ -121,6 +121,24 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispat
 	}
 }

+static inline void
+print_xstat(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispatch)
+{
+	int i;
+
+	if (dispatch) {
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			fprintf(f,
+				"|\t%-24s|%15s|%-15" PRIu64 "|%15s|%15s|%15s|%15s|%15s|%11.4s|\n",
+				stat->xstat_desc[i], "", stat->xstat_count[i], "", "", "", "", "",
+				"");
+	} else {
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			fprintf(f, "|\t%-24s|%15s|%-15" PRIu64 "|%15s|%15.3s|%15.6s|%11.4s|\n",
+				stat->xstat_desc[i], "", stat->xstat_count[i], "", "", "", "");
+	}
+}
+
 static int
 graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,
 		       const struct rte_graph_cluster_node_stats *stat)
@@ -129,8 +147,11 @@ graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,

 	if (unlikely(is_first))
 		print_banner(f, dispatch);
-	if (stat->objs)
+	if (stat->objs) {
 		print_node(f, stat, dispatch);
+		if (stat->xstat_cntrs)
+			print_xstat(f, stat, dispatch);
+	}
 	if (unlikely(is_last)) {
 		if (dispatch)
 			boarder_model_dispatch();
@@ -203,6 +224,7 @@ stats_mem_populate(struct rte_graph_cluster_stats **stats_in,
 	struct cluster_node *cluster;
 	struct rte_node *node;
 	rte_node_t count;
+	uint8_t i;

 	cluster = stats->clusters;

@@ -240,6 +262,37 @@ stats_mem_populate(struct rte_graph_cluster_stats **stats_in,
 		SET_ERR_JMP(ENOENT, free, "Failed to find node %s in graph %s",
 			    graph_node->node->name, graph->name);
 	cluster->nodes[cluster->nb_nodes++] = node;
+	if (graph_node->node->xstats) {
+		cluster->stat.xstat_cntrs = graph_node->node->xstats->nb_xstats;
+		cluster->stat.xstat_count = rte_zmalloc_socket(
+			NULL, sizeof(uint64_t) * graph_node->node->xstats->nb_xstats,
+			RTE_CACHE_LINE_SIZE, stats->socket_id);
+		if (cluster->stat.xstat_count == NULL)
+			SET_ERR_JMP(ENOMEM, free, "Failed to allocate memory node %s graph %s",
+				    graph_node->node->name, graph->name);
+
+		cluster->stat.xstat_desc = rte_zmalloc_socket(
+			NULL,
+			sizeof(RTE_NODE_XSTAT_DESC_SIZE) * graph_node->node->xstats->nb_xstats,
+			RTE_CACHE_LINE_SIZE, stats->socket_id);
+		if (cluster->stat.xstat_desc == NULL) {
+			rte_free(cluster->stat.xstat_count);
+			SET_ERR_JMP(ENOMEM, free, "Failed to allocate memory node %s graph %s",
+				    graph_node->node->name, graph->name);
+		}
+
+		for (i = 0; i < cluster->stat.xstat_cntrs; i++) {
+			if (rte_strscpy(cluster->stat.xstat_desc[i],
+					graph_node->node->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0) {
+				rte_free(cluster->stat.xstat_count);
+				rte_free(cluster->stat.xstat_desc);
+				SET_ERR_JMP(E2BIG, free,
+					    "Error description overflow node %s graph %s",
+					    graph_node->node->name, graph->name);
+			}
+		}
+	}

 	stats->sz += stats->cluster_node_size;
 	stats->max_nodes++;
@@ -388,6 +441,18 @@ rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm)
 void
 rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat)
 {
+	struct cluster_node *cluster;
+	rte_node_t count;
+
+	cluster = stat->clusters;
+	for (count = 0; count < stat->max_nodes; count++) {
+		if (cluster->stat.xstat_cntrs) {
+			rte_free(cluster->stat.xstat_count);
+			rte_free(cluster->stat.xstat_desc);
+		}
+
+		cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
+	}
 	return rte_free(stat);
 }

@@ -399,7 +464,10 @@ cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
 	uint64_t sched_objs = 0, sched_fail = 0;
 	struct rte_node *node;
 	rte_node_t count;
+	uint64_t *xstat;
+	uint8_t i;

+	memset(stat->xstat_count, 0, sizeof(uint64_t) * stat->xstat_cntrs);
 	for (count = 0; count < cluster->nb_nodes; count++) {
 		node = cluster->nodes[count];

@@ -412,6 +480,12 @@ cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
 		objs += node->total_objs;
 		cycles += node->total_cycles;
 		realloc_count += node->realloc_count;
+
+		if (node->xstat_off == 0)
+			continue;
+		xstat = RTE_PTR_ADD(node, node->xstat_off);
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			stat->xstat_count[i] += xstat[i];
 	}

 	stat->calls = calls;
@@ -464,6 +538,7 @@ rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat)
 {
 	struct cluster_node *cluster;
 	rte_node_t count;
+	uint8_t i;

 	cluster = stat->clusters;

@@ -479,6 +554,8 @@ rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat)
 		node->prev_objs = 0;
 		node->prev_cycles = 0;
 		node->realloc_count = 0;
+		for (i = 0; i < node->xstat_cntrs; i++)
+			node->xstat_count[i] = 0;
 		cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
 	}
 }
diff --git a/lib/graph/node.c b/lib/graph/node.c
index 99a9622779..2e20d5811c 100644
--- a/lib/graph/node.c
+++ b/lib/graph/node.c
@@ -85,9 +85,24 @@ __rte_node_register(const struct rte_node_register *reg)
 		goto fail;
 	}

+	if (reg->xstats) {
+		sz = sizeof(*reg->xstats) + (reg->xstats->nb_xstats * RTE_NODE_XSTAT_DESC_SIZE);
+		node->xstats = calloc(1, sz);
+		if (node->xstats == NULL) {
+			rte_errno = ENOMEM;
+			goto free;
+		}
+
+		node->xstats->nb_xstats = reg->xstats->nb_xstats;
+		for (i = 0; i < reg->xstats->nb_xstats; i++)
+			if (rte_strscpy(node->xstats->xstat_desc[i], reg->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0)
+				goto free_xstat;
+	}
+
 	/* Initialize the node */
 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0)
-		goto free;
+		goto free_xstat;
 	node->flags = reg->flags;
 	node->process = reg->process;
 	node->init = reg->init;
@@ -97,7 +112,7 @@ __rte_node_register(const struct rte_node_register *reg)
 	for (i = 0; i < reg->nb_edges; i++) {
 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
 				RTE_NODE_NAMESIZE) < 0)
-			goto free;
+			goto free_xstat;
 	}

 	node->lcore_id = RTE_MAX_LCORE;
@@ -108,6 +123,8 @@ __rte_node_register(const struct rte_node_register *reg)
 	graph_spinlock_unlock();

 	return node->id;
+free_xstat:
+	free(node->xstats);
 free:
 	free(node);
 fail:
@@ -134,6 +151,20 @@ node_clone(struct node *node, const char *name)
 		goto fail;
 	}

+	if (node->xstats) {
+		reg->xstats = calloc(1, sizeof(*node->xstats) + (node->xstats->nb_xstats *
+								 RTE_NODE_XSTAT_DESC_SIZE));
+		if (reg->xstats == NULL) {
+			rte_errno = ENOMEM;
+			goto fail;
+		}
+
+		for (i = 0; i < node->xstats->nb_xstats; i++)
+			if (rte_strscpy(reg->xstats->xstat_desc[i], node->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0)
+				goto free_xstat;
+	}
+
 	/* Clone the source node */
 	reg->flags = node->flags;
 	reg->process = node->process;
@@ -150,6 +181,8 @@ node_clone(struct node *node, const char *name)
 		goto free;

 	rc = __rte_node_register(reg);
+free_xstat:
+	free(reg->xstats);
 free:
 	free(reg);
 fail:
diff --git a/lib/graph/rte_graph.h b/lib/graph/rte_graph.h
index ecfec2068a..9c708a150d 100644
--- a/lib/graph/rte_graph.h
+++ b/lib/graph/rte_graph.h
@@ -29,6 +29,7 @@ extern "C" {

 #define RTE_GRAPH_NAMESIZE 64 /**< Max length of graph name. */
 #define RTE_NODE_NAMESIZE 64  /**< Max length of node name. */
+#define RTE_NODE_XSTAT_DESC_SIZE 64  /**< Max length of node xstat. */
 #define RTE_GRAPH_PCAP_FILE_SZ 64 /**< Max length of pcap file name. */
 #define RTE_GRAPH_OFF_INVALID UINT32_MAX /**< Invalid graph offset. */
 #define RTE_NODE_ID_INVALID UINT32_MAX   /**< Invalid node id. */
@@ -222,6 +223,10 @@ struct __rte_cache_aligned rte_graph_cluster_node_stats {

 	uint64_t realloc_count; /**< Realloc count. */

+	uint8_t xstat_cntrs;			      /**< Number of Node xstat counters. */
+	char (*xstat_desc)[RTE_NODE_XSTAT_DESC_SIZE]; /**< Names of the Node xstat counters. */
+	uint64_t *xstat_count;			      /**< Total stat count per each xstat. */
+
 	rte_node_t id;	/**< Node identifier of stats. */
 	uint64_t hz;	/**< Cycles per seconds. */
 	char name[RTE_NODE_NAMESIZE];	/**< Name of the node. */
@@ -460,6 +465,11 @@ void rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat,
  */
 void rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat);

+struct rte_node_xstats {
+	uint16_t nb_xstats;			     /**< Number of xstats. */
+	char xstat_desc[][RTE_NODE_XSTAT_DESC_SIZE]; /**< Names of xstats. */
+};
+
 /**
  * Structure defines the node registration parameters.
  *
@@ -472,6 +482,7 @@ struct rte_node_register {
 	rte_node_process_t process; /**< Node process function. */
 	rte_node_init_t init;       /**< Node init function. */
 	rte_node_fini_t fini;       /**< Node fini function. */
+	struct rte_node_xstats *xstats; /**< Node specific xstats. */
 	rte_node_t id;		    /**< Node Identifier. */
 	rte_node_t parent_id;       /**< Identifier of parent node. */
 	rte_edge_t nb_edges;        /**< Number of edges from this node. */
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index 8d8956fddd..c18b58cd32 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -112,6 +112,7 @@ struct __rte_cache_aligned rte_node {
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
 		} dispatch;
 	};
+	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
 	/* Fast path area  */
 	__extension__ struct __rte_cache_aligned {
 #define RTE_NODE_CTX_SZ 16
@@ -584,6 +585,28 @@ uint8_t rte_graph_worker_model_no_check_get(struct rte_graph *graph)
 	return graph->model;
 }

+/**
+ * Increment Node xstat count.
+ *
+ * Increment the count of an xstat for a given node.
+ *
+ * @param node
+ *   Pointer to the node.
+ * @param xstat_id
+ *   Error ID.
+ * @param value
+ *   Value to increment.
+ */
+__rte_experimental
+static inline void
+rte_node_xstat_increment(struct rte_node *node, uint16_t xstat_id, uint64_t value)
+{
+	if (rte_graph_has_stats_feature()) {
+		uint64_t *xstat = (uint64_t *)RTE_PTR_ADD(node, node->xstat_off);
+		xstat[xstat_id] += value;
+	}
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/graph/version.map b/lib/graph/version.map
index 2c83425ddc..44fadc00fd 100644
--- a/lib/graph/version.map
+++ b/lib/graph/version.map
@@ -52,3 +52,10 @@ DPDK_25 {

 	local: *;
 };
+
+EXPERIMENTAL {
+	global:
+
+	# added in 24.11
+	rte_node_xstat_increment;
+};
--
2.25.1


^ permalink raw reply	[relevance 3%]

* [PATCH v7 1/3] graph: add support for node specific xstats
  @ 2024-10-15  5:42  3%     ` pbhagavatula
  0 siblings, 0 replies; 169+ results
From: pbhagavatula @ 2024-10-15  5:42 UTC (permalink / raw)
  To: jerinj, ndabilpuram, kirankumark, zhirun.yan, rjarry,
	david.marchand, Zhirun Yan
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add ability for Nodes to advertise xstat counters
during registration and increment them in fastpath.
Add support for retrieving/printing stats for node
specific xstats using rte_graph_cluster_stats_get().
Add `rte_node_xstat_increment` API to increment node
specific xstat counters.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Kiran Kumar K <kirankumark@marvell.com>
Reviewed-by: Robin Jarry <rjarry@redhat.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
 doc/guides/prog_guide/graph_lib.rst    | 22 +++++--
 doc/guides/rel_notes/deprecation.rst   |  6 --
 doc/guides/rel_notes/release_24_11.rst |  8 +++
 lib/graph/graph_populate.c             | 20 ++++++-
 lib/graph/graph_private.h              |  3 +
 lib/graph/graph_stats.c                | 79 +++++++++++++++++++++++++-
 lib/graph/node.c                       | 37 +++++++++++-
 lib/graph/rte_graph.h                  | 15 +++++
 lib/graph/rte_graph_worker_common.h    | 23 ++++++++
 lib/graph/version.map                  |  7 +++
 10 files changed, 205 insertions(+), 15 deletions(-)

diff --git a/doc/guides/prog_guide/graph_lib.rst b/doc/guides/prog_guide/graph_lib.rst
index ad09bdfe26..4d9ae84ada 100644
--- a/doc/guides/prog_guide/graph_lib.rst
+++ b/doc/guides/prog_guide/graph_lib.rst
@@ -21,6 +21,7 @@ Features of the Graph library are:
 - Nodes as plugins.
 - Support for out of tree nodes.
 - Inbuilt nodes for packet processing.
+- Node specific xstat counts.
 - Multi-process support.
 - Low overhead graph walk and node enqueue.
 - Low overhead statistics collection infrastructure.
@@ -124,6 +125,18 @@ Source nodes are static nodes created using ``RTE_NODE_REGISTER`` by passing
 While performing the graph walk, the ``process()`` function of all the source
 nodes will be called first. So that these nodes can be used as input nodes for a graph.
 
+nb_xstats:
+^^^^^^^^^^
+
+The number of xstats that this node can report. The ``xstat_desc[]`` stores the xstat
+descriptions which will later be propagated to stats.
+
+xstat_desc[]:
+^^^^^^^^^^^^^
+
+The dynamic array to store the xstat descriptions that will be reported by this
+node.
+
 Node creation and registration
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * Node implementer creates the node by implementing ops and attributes of
@@ -141,13 +154,13 @@ Link the Nodes to create the graph topology
    Topology after linking the nodes
 
 Once nodes are available to the program, Application or node public API
-functions can links them together to create a complex packet processing graph.
+functions can link them together to create a complex packet processing graph.
 
 There are multiple different types of strategies to link the nodes.
 
 Method (a):
 ^^^^^^^^^^^
-Provide the ``next_nodes[]`` at the node registration time. See  ``struct rte_node_register::nb_edges``.
+Provide the ``next_nodes[]`` at the node registration time. See ``struct rte_node_register::nb_edges``.
 This is a use case to address the static node scheme where one knows upfront the
 ``next_nodes[]`` of the node.
 
@@ -385,8 +398,9 @@ Understanding the memory layout helps to debug the graph library and
 improve the performance if needed.
 
 Graph object consists of a header, circular buffer to store the pending
-stream when walking over the graph, and variable-length memory to store
-the ``rte_node`` objects.
+stream when walking over the graph, variable-length memory to store
+the ``rte_node`` objects, and variable-length memory to store the xstat
+reported by each ``rte_node``.
 
 The graph_nodes_mem_create() creates and populate this memory. The functions
 such as ``rte_graph_walk()`` and ``rte_node_enqueue_*`` use this memory
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 7bc2310bc4..20fcfedb7b 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -193,9 +193,3 @@ Deprecation Notices
   will be deprecated and subsequently removed in DPDK 24.11 release.
   Before this, the new port library API (functions rte_swx_port_*)
   will gradually transition from experimental to stable status.
-
-* graph: The graph library data structures will be modified
-  to support node specific errors.
-  The structures ``rte_node``, ``rte_node_register``
-  and ``rte_graph_cluster_node_stats`` will be extended
-  to include node error counters and error description.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index dcee09b5d0..fee4b2305d 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -178,6 +178,10 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Add node specific xstats for rte_graph**
+
+  * Added ability for node to advertise and update multiple xstat counters,
+    that can be retrieved using rte_graph_cluster_stats_get.
 
 Removed Items
 -------------
@@ -260,6 +264,10 @@ ABI Changes
 
 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
+* graph: To accommodate node specific xstats counters added ``xstar_cntrs``,
+  ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``,
+  added new structure ``rte_node_xstats`` to ``rte_node_register`` and
+  added ``xstat_off`` to ``rte_node``.
 
 Known Issues
 ------------
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index ed596a7711..1e6b08319e 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -39,6 +39,15 @@ graph_fp_mem_calc_size(struct graph *graph)
 		/* Pointer to next nodes(edges) */
 		sz += sizeof(struct rte_node *) * graph_node->node->nb_edges;
 	}
+	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+	graph->xstats_start = sz;
+	/* For 0..N node objects with xstats */
+	STAILQ_FOREACH(graph_node, &graph->node_list, next) {
+		if (graph_node->node->xstats == NULL)
+			continue;
+		sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+		sz += sizeof(uint64_t) * graph_node->node->xstats->nb_xstats;
+	}
 
 	graph->mem_sz = sz;
 	return sz;
@@ -64,6 +73,7 @@ graph_header_popluate(struct graph *_graph)
 static void
 graph_nodes_populate(struct graph *_graph)
 {
+	rte_graph_off_t xstat_off = _graph->xstats_start;
 	rte_graph_off_t off = _graph->nodes_start;
 	struct rte_graph *graph = _graph->graph;
 	struct graph_node *graph_node;
@@ -99,6 +109,12 @@ graph_nodes_populate(struct graph *_graph)
 						     ->adjacency_list[count]
 						     ->node->name[0];
 
+		if (graph_node->node->xstats != NULL) {
+			node->xstat_off = xstat_off - off;
+			xstat_off += sizeof(uint64_t) * graph_node->node->xstats->nb_xstats;
+			xstat_off = RTE_ALIGN(xstat_off, RTE_CACHE_LINE_SIZE);
+		}
+
 		off += sizeof(struct rte_node *) * nb_edges;
 		off = RTE_ALIGN(off, RTE_CACHE_LINE_SIZE);
 		node->next = off;
@@ -158,7 +174,7 @@ graph_node_nexts_populate(struct graph *_graph)
 }
 
 static int
-graph_src_nodes_populate(struct graph *_graph)
+graph_src_nodes_offset_populate(struct graph *_graph)
 {
 	struct rte_graph *graph = _graph->graph;
 	struct graph_node *graph_node;
@@ -193,7 +209,7 @@ graph_fp_mem_populate(struct graph *graph)
 		graph_pcap_init(graph);
 	graph_nodes_populate(graph);
 	rc = graph_node_nexts_populate(graph);
-	rc |= graph_src_nodes_populate(graph);
+	rc |= graph_src_nodes_offset_populate(graph);
 
 	return rc;
 }
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index d557d55f2d..da48d73587 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -61,6 +61,7 @@ struct node {
 	rte_node_t id;		      /**< Allocated identifier for the node. */
 	rte_node_t parent_id;	      /**< Parent node identifier. */
 	rte_edge_t nb_edges;	      /**< Number of edges from this node. */
+	struct rte_node_xstats *xstats;	      /**< Node specific xstats. */
 	char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
 };
 
@@ -102,6 +103,8 @@ struct graph {
 	/**< Memzone to store graph data. */
 	rte_graph_off_t nodes_start;
 	/**< Node memory start offset in graph reel. */
+	rte_graph_off_t xstats_start;
+	/**< Node xstats memory start offset in graph reel. */
 	rte_node_t src_node_count;
 	/**< Number of source nodes in a graph. */
 	struct rte_graph *graph;
diff --git a/lib/graph/graph_stats.c b/lib/graph/graph_stats.c
index d71451a17b..a34b4a8200 100644
--- a/lib/graph/graph_stats.c
+++ b/lib/graph/graph_stats.c
@@ -121,6 +121,24 @@ print_node(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispat
 	}
 }
 
+static inline void
+print_xstat(FILE *f, const struct rte_graph_cluster_node_stats *stat, bool dispatch)
+{
+	int i;
+
+	if (dispatch) {
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			fprintf(f,
+				"|\t%-24s|%15s|%-15" PRIu64 "|%15s|%15s|%15s|%15s|%15s|%11.4s|\n",
+				stat->xstat_desc[i], "", stat->xstat_count[i], "", "", "", "", "",
+				"");
+	} else {
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			fprintf(f, "|\t%-24s|%15s|%-15" PRIu64 "|%15s|%15.3s|%15.6s|%11.4s|\n",
+				stat->xstat_desc[i], "", stat->xstat_count[i], "", "", "", "");
+	}
+}
+
 static int
 graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,
 		       const struct rte_graph_cluster_node_stats *stat)
@@ -129,8 +147,11 @@ graph_cluster_stats_cb(bool dispatch, bool is_first, bool is_last, void *cookie,
 
 	if (unlikely(is_first))
 		print_banner(f, dispatch);
-	if (stat->objs)
+	if (stat->objs) {
 		print_node(f, stat, dispatch);
+		if (stat->xstat_cntrs)
+			print_xstat(f, stat, dispatch);
+	}
 	if (unlikely(is_last)) {
 		if (dispatch)
 			boarder_model_dispatch();
@@ -203,6 +224,7 @@ stats_mem_populate(struct rte_graph_cluster_stats **stats_in,
 	struct cluster_node *cluster;
 	struct rte_node *node;
 	rte_node_t count;
+	uint8_t i;
 
 	cluster = stats->clusters;
 
@@ -240,6 +262,37 @@ stats_mem_populate(struct rte_graph_cluster_stats **stats_in,
 		SET_ERR_JMP(ENOENT, free, "Failed to find node %s in graph %s",
 			    graph_node->node->name, graph->name);
 	cluster->nodes[cluster->nb_nodes++] = node;
+	if (graph_node->node->xstats) {
+		cluster->stat.xstat_cntrs = graph_node->node->xstats->nb_xstats;
+		cluster->stat.xstat_count = rte_zmalloc_socket(
+			NULL, sizeof(uint64_t) * graph_node->node->xstats->nb_xstats,
+			RTE_CACHE_LINE_SIZE, stats->socket_id);
+		if (cluster->stat.xstat_count == NULL)
+			SET_ERR_JMP(ENOMEM, free, "Failed to allocate memory node %s graph %s",
+				    graph_node->node->name, graph->name);
+
+		cluster->stat.xstat_desc = rte_zmalloc_socket(
+			NULL,
+			sizeof(RTE_NODE_XSTAT_DESC_SIZE) * graph_node->node->xstats->nb_xstats,
+			RTE_CACHE_LINE_SIZE, stats->socket_id);
+		if (cluster->stat.xstat_desc == NULL) {
+			rte_free(cluster->stat.xstat_count);
+			SET_ERR_JMP(ENOMEM, free, "Failed to allocate memory node %s graph %s",
+				    graph_node->node->name, graph->name);
+		}
+
+		for (i = 0; i < cluster->stat.xstat_cntrs; i++) {
+			if (rte_strscpy(cluster->stat.xstat_desc[i],
+					graph_node->node->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0) {
+				rte_free(cluster->stat.xstat_count);
+				rte_free(cluster->stat.xstat_desc);
+				SET_ERR_JMP(E2BIG, free,
+					    "Error description overflow node %s graph %s",
+					    graph_node->node->name, graph->name);
+			}
+		}
+	}
 
 	stats->sz += stats->cluster_node_size;
 	stats->max_nodes++;
@@ -388,6 +441,18 @@ rte_graph_cluster_stats_create(const struct rte_graph_cluster_stats_param *prm)
 void
 rte_graph_cluster_stats_destroy(struct rte_graph_cluster_stats *stat)
 {
+	struct cluster_node *cluster;
+	rte_node_t count;
+
+	cluster = stat->clusters;
+	for (count = 0; count < stat->max_nodes; count++) {
+		if (cluster->stat.xstat_cntrs) {
+			rte_free(cluster->stat.xstat_count);
+			rte_free(cluster->stat.xstat_desc);
+		}
+
+		cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
+	}
 	return rte_free(stat);
 }
 
@@ -399,7 +464,10 @@ cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
 	uint64_t sched_objs = 0, sched_fail = 0;
 	struct rte_node *node;
 	rte_node_t count;
+	uint64_t *xstat;
+	uint8_t i;
 
+	memset(stat->xstat_count, 0, sizeof(uint64_t) * stat->xstat_cntrs);
 	for (count = 0; count < cluster->nb_nodes; count++) {
 		node = cluster->nodes[count];
 
@@ -412,6 +480,12 @@ cluster_node_arregate_stats(struct cluster_node *cluster, bool dispatch)
 		objs += node->total_objs;
 		cycles += node->total_cycles;
 		realloc_count += node->realloc_count;
+
+		if (node->xstat_off == 0)
+			continue;
+		xstat = RTE_PTR_ADD(node, node->xstat_off);
+		for (i = 0; i < stat->xstat_cntrs; i++)
+			stat->xstat_count[i] += xstat[i];
 	}
 
 	stat->calls = calls;
@@ -464,6 +538,7 @@ rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat)
 {
 	struct cluster_node *cluster;
 	rte_node_t count;
+	uint8_t i;
 
 	cluster = stat->clusters;
 
@@ -479,6 +554,8 @@ rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat)
 		node->prev_objs = 0;
 		node->prev_cycles = 0;
 		node->realloc_count = 0;
+		for (i = 0; i < node->xstat_cntrs; i++)
+			node->xstat_count[i] = 0;
 		cluster = RTE_PTR_ADD(cluster, stat->cluster_node_size);
 	}
 }
diff --git a/lib/graph/node.c b/lib/graph/node.c
index 99a9622779..2e20d5811c 100644
--- a/lib/graph/node.c
+++ b/lib/graph/node.c
@@ -85,9 +85,24 @@ __rte_node_register(const struct rte_node_register *reg)
 		goto fail;
 	}
 
+	if (reg->xstats) {
+		sz = sizeof(*reg->xstats) + (reg->xstats->nb_xstats * RTE_NODE_XSTAT_DESC_SIZE);
+		node->xstats = calloc(1, sz);
+		if (node->xstats == NULL) {
+			rte_errno = ENOMEM;
+			goto free;
+		}
+
+		node->xstats->nb_xstats = reg->xstats->nb_xstats;
+		for (i = 0; i < reg->xstats->nb_xstats; i++)
+			if (rte_strscpy(node->xstats->xstat_desc[i], reg->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0)
+				goto free_xstat;
+	}
+
 	/* Initialize the node */
 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0)
-		goto free;
+		goto free_xstat;
 	node->flags = reg->flags;
 	node->process = reg->process;
 	node->init = reg->init;
@@ -97,7 +112,7 @@ __rte_node_register(const struct rte_node_register *reg)
 	for (i = 0; i < reg->nb_edges; i++) {
 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
 				RTE_NODE_NAMESIZE) < 0)
-			goto free;
+			goto free_xstat;
 	}
 
 	node->lcore_id = RTE_MAX_LCORE;
@@ -108,6 +123,8 @@ __rte_node_register(const struct rte_node_register *reg)
 	graph_spinlock_unlock();
 
 	return node->id;
+free_xstat:
+	free(node->xstats);
 free:
 	free(node);
 fail:
@@ -134,6 +151,20 @@ node_clone(struct node *node, const char *name)
 		goto fail;
 	}
 
+	if (node->xstats) {
+		reg->xstats = calloc(1, sizeof(*node->xstats) + (node->xstats->nb_xstats *
+								 RTE_NODE_XSTAT_DESC_SIZE));
+		if (reg->xstats == NULL) {
+			rte_errno = ENOMEM;
+			goto fail;
+		}
+
+		for (i = 0; i < node->xstats->nb_xstats; i++)
+			if (rte_strscpy(reg->xstats->xstat_desc[i], node->xstats->xstat_desc[i],
+					RTE_NODE_XSTAT_DESC_SIZE) < 0)
+				goto free_xstat;
+	}
+
 	/* Clone the source node */
 	reg->flags = node->flags;
 	reg->process = node->process;
@@ -150,6 +181,8 @@ node_clone(struct node *node, const char *name)
 		goto free;
 
 	rc = __rte_node_register(reg);
+free_xstat:
+	free(reg->xstats);
 free:
 	free(reg);
 fail:
diff --git a/lib/graph/rte_graph.h b/lib/graph/rte_graph.h
index ecfec2068a..a2017eacf4 100644
--- a/lib/graph/rte_graph.h
+++ b/lib/graph/rte_graph.h
@@ -29,6 +29,7 @@ extern "C" {
 
 #define RTE_GRAPH_NAMESIZE 64 /**< Max length of graph name. */
 #define RTE_NODE_NAMESIZE 64  /**< Max length of node name. */
+#define RTE_NODE_XSTAT_DESC_SIZE 64  /**< Max length of node xstat. */
 #define RTE_GRAPH_PCAP_FILE_SZ 64 /**< Max length of pcap file name. */
 #define RTE_GRAPH_OFF_INVALID UINT32_MAX /**< Invalid graph offset. */
 #define RTE_NODE_ID_INVALID UINT32_MAX   /**< Invalid node id. */
@@ -222,6 +223,10 @@ struct __rte_cache_aligned rte_graph_cluster_node_stats {
 
 	uint64_t realloc_count; /**< Realloc count. */
 
+	uint8_t xstat_cntrs;			      /**< Number of Node xstat counters. */
+	char (*xstat_desc)[RTE_NODE_XSTAT_DESC_SIZE]; /**< Names of the Node xstat counters. */
+	uint64_t *xstat_count;			      /**< Total stat count per each xstat. */
+
 	rte_node_t id;	/**< Node identifier of stats. */
 	uint64_t hz;	/**< Cycles per seconds. */
 	char name[RTE_NODE_NAMESIZE];	/**< Name of the node. */
@@ -460,6 +465,15 @@ void rte_graph_cluster_stats_get(struct rte_graph_cluster_stats *stat,
  */
 void rte_graph_cluster_stats_reset(struct rte_graph_cluster_stats *stat);
 
+/**
+ * Structure defines the number of xstats a given node has and each xstat
+ * description.
+ */
+struct rte_node_xstats {
+	uint16_t nb_xstats;			     /**< Number of xstats. */
+	char xstat_desc[][RTE_NODE_XSTAT_DESC_SIZE]; /**< Names of xstats. */
+};
+
 /**
  * Structure defines the node registration parameters.
  *
@@ -472,6 +486,7 @@ struct rte_node_register {
 	rte_node_process_t process; /**< Node process function. */
 	rte_node_init_t init;       /**< Node init function. */
 	rte_node_fini_t fini;       /**< Node fini function. */
+	struct rte_node_xstats *xstats; /**< Node specific xstats. */
 	rte_node_t id;		    /**< Node Identifier. */
 	rte_node_t parent_id;       /**< Identifier of parent node. */
 	rte_edge_t nb_edges;        /**< Number of edges from this node. */
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index 8d8956fddd..a518af2b2a 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -112,6 +112,7 @@ struct __rte_cache_aligned rte_node {
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
 		} dispatch;
 	};
+	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
 	/* Fast path area  */
 	__extension__ struct __rte_cache_aligned {
 #define RTE_NODE_CTX_SZ 16
@@ -584,6 +585,28 @@ uint8_t rte_graph_worker_model_no_check_get(struct rte_graph *graph)
 	return graph->model;
 }
 
+/**
+ * Increment Node xstat count.
+ *
+ * Increment the count of an xstat for a given node.
+ *
+ * @param node
+ *   Pointer to the node.
+ * @param xstat_id
+ *   xstat ID.
+ * @param value
+ *   Value to increment.
+ */
+__rte_experimental
+static inline void
+rte_node_xstat_increment(struct rte_node *node, uint16_t xstat_id, uint64_t value)
+{
+	if (rte_graph_has_stats_feature()) {
+		uint64_t *xstat = (uint64_t *)RTE_PTR_ADD(node, node->xstat_off);
+		xstat[xstat_id] += value;
+	}
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/graph/version.map b/lib/graph/version.map
index 2c83425ddc..44fadc00fd 100644
--- a/lib/graph/version.map
+++ b/lib/graph/version.map
@@ -52,3 +52,10 @@ DPDK_25 {
 
 	local: *;
 };
+
+EXPERIMENTAL {
+	global:
+
+	# added in 24.11
+	rte_node_xstat_increment;
+};
-- 
2.25.1


^ permalink raw reply	[relevance 3%]

* [RFC 00/10] eventdev: remove single-event enqueue and dequeue
@ 2024-10-15  8:49  3% Mattias Rönnblom
  2024-10-15  8:49  8% ` [RFC 10/10] eventdev: remove single event " Mattias Rönnblom
  2024-10-15 17:07  0% ` [RFC 00/10] eventdev: remove single-event " Stephen Hemminger
  0 siblings, 2 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-15  8:49 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Mattias Rönnblom

Remove the single-event enqueue and dequeue functions from the
eventdev "ops" struct, to reduce complexity, leaving performance
unaffected.

This ABI change has been announced as a DPDK deprication notice,
originally scheduled for DPDK 23.11.

Mattias Rönnblom (10):
  event/dsw: remove single event enqueue and dequeue
  event/dlb2: remove single event enqueue and dequeue
  event/cnxk: remove single event enqueue and dequeue
  event/octeontx: remove single event enqueue and dequeue
  event/sw: remove single event enqueue and dequeue
  event/dpaa: remove single event enqueue and dequeue
  event/dpaa2: remove single event enqueue and dequeue
  event/opdl: remove single event enqueue and dequeue
  event/skeleton: remove single event enqueue and dequeue
  eventdev: remove single event enqueue and dequeue

 doc/guides/rel_notes/deprecation.rst       |  6 +--
 drivers/event/cnxk/cn10k_eventdev.c        |  2 -
 drivers/event/cnxk/cn10k_worker.c          | 49 ++++++++++------------
 drivers/event/cnxk/cn10k_worker.h          |  9 ----
 drivers/event/cnxk/cn9k_eventdev.c         |  1 -
 drivers/event/cnxk/cn9k_worker.c           | 26 ++++--------
 drivers/event/cnxk/cn9k_worker.h           | 17 --------
 drivers/event/dlb2/dlb2.c                  | 33 +--------------
 drivers/event/dpaa/dpaa_eventdev.c         | 27 +-----------
 drivers/event/dpaa2/dpaa2_eventdev.c       |  2 -
 drivers/event/dsw/dsw_evdev.c              |  2 -
 drivers/event/dsw/dsw_evdev.h              |  2 -
 drivers/event/dsw/dsw_event.c              | 12 ------
 drivers/event/octeontx/ssovf_evdev.h       |  1 -
 drivers/event/octeontx/ssovf_worker.c      | 40 ++----------------
 drivers/event/opdl/opdl_evdev.c            |  2 -
 drivers/event/skeleton/skeleton_eventdev.c |  4 --
 drivers/event/sw/sw_evdev.c                |  2 -
 drivers/event/sw/sw_evdev.h                |  2 -
 drivers/event/sw/sw_evdev_worker.c         | 12 ------
 lib/eventdev/eventdev_pmd.h                |  4 --
 lib/eventdev/eventdev_private.c            | 22 ----------
 lib/eventdev/rte_eventdev.h                | 21 ++--------
 lib/eventdev/rte_eventdev_core.h           |  4 --
 24 files changed, 43 insertions(+), 259 deletions(-)

-- 
2.43.0


^ permalink raw reply	[relevance 3%]

* [RFC 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-15  8:49  3% [RFC 00/10] eventdev: remove single-event enqueue and dequeue Mattias Rönnblom
@ 2024-10-15  8:49  8% ` Mattias Rönnblom
  2024-10-15 17:07  0% ` [RFC 00/10] eventdev: remove single-event " Stephen Hemminger
  1 sibling, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-15  8:49 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Mattias Rönnblom

Remove the single event enqueue and dequeue, since they did not
provide any noticable performance benefits.

This is a change of the ABI, previously announced as a deprecation
notice. These functions were not directly called by the application,
so the API remains unaffected.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 doc/guides/rel_notes/deprecation.rst |  6 +-----
 lib/eventdev/eventdev_pmd.h          |  4 ----
 lib/eventdev/eventdev_private.c      | 22 ----------------------
 lib/eventdev/rte_eventdev.h          | 21 ++++-----------------
 lib/eventdev/rte_eventdev_core.h     |  4 ----
 5 files changed, 5 insertions(+), 52 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 7bc2310bc4..6a6fd54444 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -173,11 +173,7 @@ Deprecation Notices
 
 * eventdev: The single-event (non-burst) enqueue and dequeue operations,
   used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
-  will be removed in DPDK 23.11.
-  This simplification includes changing the layout and potentially also
-  the size of the public ``rte_event_fp_ops`` struct, breaking the ABI.
-  Since these functions are not called directly by the application,
-  the API remains unaffected.
+  are removed in DPDK 24.11.
 
 * pipeline: The pipeline library legacy API (functions rte_pipeline_*)
   will be deprecated and subsequently removed in DPDK 24.11 release.
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index af855e3467..36148f8d86 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
 
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< Pointer to PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< Pointer to PMD enqueue burst function(op new variant) */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 	event_maintain_t maintain;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index b628f4a69e..6df129fc2d 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -5,15 +5,6 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev.h"
 
-static uint16_t
-dummy_event_enqueue(__rte_unused void *port,
-		    __rte_unused const struct rte_event *ev)
-{
-	RTE_EDEV_LOG_ERR(
-		"event enqueue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_enqueue_burst(__rte_unused void *port,
 			  __rte_unused const struct rte_event ev[],
@@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
-static uint16_t
-dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
-		    __rte_unused uint64_t timeout_ticks)
-{
-	RTE_EDEV_LOG_ERR(
-		"event dequeue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_dequeue_burst(__rte_unused void *port,
 			  __rte_unused struct rte_event ev[],
@@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
 	static const struct rte_event_fp_ops dummy = {
-		.enqueue = dummy_event_enqueue,
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
-		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
@@ -153,11 +133,9 @@ void
 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 		     const struct rte_eventdev *dev)
 {
-	fp_op->enqueue = dev->enqueue;
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
-	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
 	fp_op->txa_enqueue = dev->txa_enqueue;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index b5c3c16dd0..fabd1490db 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	}
 #endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->enqueue)(port, ev);
-	else
-		return fn(port, ev, nb_events);
+
+	return fn(port, ev, nb_events);
 }
 
 /**
@@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	}
 #endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->dequeue)(port, ev, timeout_ticks);
-	else
-		return (fp_ops->dequeue_burst)(port, ev, nb_events,
-					       timeout_ticks);
+
+	return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
 }
 
 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 2706d5e6c8..78b06d1f2e 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -60,16 +60,12 @@ typedef void (*event_preschedule_t)(void *port,
 struct __rte_cache_aligned rte_event_fp_ops {
 	void **data;
 	/**< points to array of internal port data pointers */
-	event_enqueue_t enqueue;
-	/**< PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< PMD enqueue burst new function. */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< PMD enqueue burst fwd function. */
-	event_dequeue_t dequeue;
-	/**< PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< PMD dequeue burst function. */
 	event_maintain_t maintain;
-- 
2.43.0


^ permalink raw reply	[relevance 8%]

* Re: [PATCH v10 0/2] power: introduce PM QoS interface
  2024-10-14 15:27  0%   ` Stephen Hemminger
@ 2024-10-15  9:30  0%     ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-15  9:30 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: dev, mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, david.marchand, fengchengwen, liuyonglong


在 2024/10/14 23:27, Stephen Hemminger 写道:
> On Thu, 12 Sep 2024 10:38:10 +0800
> Huisong Li <lihuisong@huawei.com> wrote:
>
>> The deeper the idle state, the lower the power consumption, but the longer
>> the resume time. Some service are delay sensitive and very except the low
>> resume time, like interrupt packet receiving mode.
>>
>> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
>> interface is used to set and get the resume latency limit on the cpuX for
>> userspace. Please see the description in kernel document[1].
>> Each cpuidle governor in Linux select which idle state to enter based on
>> this CPU resume latency in their idle task.
>>
>> The per-CPU PM QoS API can be used to control this CPU's idle state
>> selection and limit just enter the shallowest idle state to low the delay
>> after sleep by setting strict resume latency (zero value).
>>
>> [1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
>
> This is not a direct critique of this patch.
> The power library should have been designed to take a single configuration structure
> specifying CPU frequencies, wake up latency, and all the parameters from the kernel.
> And there would be a simple API with: rte_power_config_set() and rte_power_config_get().
Agreed. There are several different configuration objects in power library.
It would be better if we could put the relevant configurations together.
This may be able to do it after Sivaprasad's optimized patches for core 
and uncore codes in power library.
>
> .

^ permalink raw reply	[relevance 0%]

* [PATCH 1/3] bitops: fix build for GCC without experimental API
  @ 2024-10-15 12:10  3% ` David Marchand
  2024-10-15 12:47  0%   ` Morten Brørup
    1 sibling, 1 reply; 169+ results
From: David Marchand @ 2024-10-15 12:10 UTC (permalink / raw)
  To: dev
  Cc: thomas, bruce.richardson, ktraynor, Jack Bond-Preston,
	Tyler Retzlaff, Morten Brørup, Mattias Rönnblom

Building OVS against current DPDK fails with following warnings:

In file included from .../ovs/dpdk-dir/include/rte_memory.h:18,
                 from .../ovs/dpdk-dir/include/rte_ring_core.h:29,
                 from .../ovs/dpdk-dir/include/rte_ring.h:37,
                 from .../ovs/dpdk-dir/include/rte_mempool.h:49,
                 from .../ovs/dpdk-dir/include/rte_mbuf.h:38,
                 from lib/dp-packet.h:25,
                 from lib/ofp-packet.c:20:
.../ovs/dpdk-dir/include/rte_bitops.h: In function ‘__rte_bit_assign32’:
.../ovs/dpdk-dir/include/rte_bitops.h:528:1: error:
	‘__rte_bit_set32’ is deprecated: Symbol is not yet part of
	stable ABI [-Werror=deprecated-declarations]
...

This comes from the fact that some (experimental) inline helpers
are calling other experimental API.
Hide those calls.

Fixes: 471de107ae23 ("bitops: add new bit manipulation API")

Reported-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/eal/include/rte_bitops.h | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lib/eal/include/rte_bitops.h b/lib/eal/include/rte_bitops.h
index e08e41199a..deb1fd43f2 100644
--- a/lib/eal/include/rte_bitops.h
+++ b/lib/eal/include/rte_bitops.h
@@ -525,8 +525,10 @@ __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsign
 	__RTE_GEN_BIT_OPS(,, size) \
 	__RTE_GEN_BIT_OPS(v_, volatile, size)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_GEN_BIT_OPS_SIZE(32)
 __RTE_GEN_BIT_OPS_SIZE(64)
+#endif
 
 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
 __rte_experimental \
@@ -651,8 +653,10 @@ __rte_bit_atomic_ ## variant ## test_and_assign ## size( \
 	__RTE_GEN_BIT_ATOMIC_OPS(,, size) \
 	__RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
+#endif
 
 /*------------------------ 32-bit relaxed operations ------------------------*/
 
@@ -1481,6 +1485,7 @@ rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_nam
 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
 		arg2_type, arg2_name, arg3_type, arg3_name)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr)
 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr)
 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr)
@@ -1496,6 +1501,7 @@ __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memor
 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order)
 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value,
 	int, memory_order)
+#endif
 
 #endif
 
-- 
2.46.2


^ permalink raw reply	[relevance 3%]

* RE: [PATCH 1/3] bitops: fix build for GCC without experimental API
  2024-10-15 12:10  3% ` [PATCH 1/3] bitops: fix build for GCC without experimental API David Marchand
@ 2024-10-15 12:47  0%   ` Morten Brørup
  0 siblings, 0 replies; 169+ results
From: Morten Brørup @ 2024-10-15 12:47 UTC (permalink / raw)
  To: David Marchand, dev
  Cc: thomas, bruce.richardson, ktraynor, Jack Bond-Preston,
	Tyler Retzlaff, Mattias Rönnblom

> From: David Marchand [mailto:david.marchand@redhat.com]
> Sent: Tuesday, 15 October 2024 14.11
> 
> Building OVS against current DPDK fails with following warnings:
> 
> In file included from .../ovs/dpdk-dir/include/rte_memory.h:18,
>                  from .../ovs/dpdk-dir/include/rte_ring_core.h:29,
>                  from .../ovs/dpdk-dir/include/rte_ring.h:37,
>                  from .../ovs/dpdk-dir/include/rte_mempool.h:49,
>                  from .../ovs/dpdk-dir/include/rte_mbuf.h:38,
>                  from lib/dp-packet.h:25,
>                  from lib/ofp-packet.c:20:
> .../ovs/dpdk-dir/include/rte_bitops.h: In function
> ‘__rte_bit_assign32’:
> .../ovs/dpdk-dir/include/rte_bitops.h:528:1: error:
> 	‘__rte_bit_set32’ is deprecated: Symbol is not yet part of
> 	stable ABI [-Werror=deprecated-declarations]
> ...
> 
> This comes from the fact that some (experimental) inline helpers
> are calling other experimental API.
> Hide those calls.

Are you saying that the compiler warns if some experimental function calls another experimental function?

I understand the fix, so I am wondering about consistency:
Do we generally hide experimental functions if building without ALLOW_EXPERIMENTAL_API?
Or do we only do it to inline functions?


An alternative solution is copy-pasting the inlined implementations of the called experimental functions into the functions calling them.
I generally don't like copy-paste, so probably even worse.


Anyway, this is a viable fix, so
Acked-by: Morten Brørup <mb@smartsharesystems.com>


^ permalink raw reply	[relevance 0%]

* Re: [RFC 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-15  8:49  3% [RFC 00/10] eventdev: remove single-event enqueue and dequeue Mattias Rönnblom
  2024-10-15  8:49  8% ` [RFC 10/10] eventdev: remove single event " Mattias Rönnblom
@ 2024-10-15 17:07  0% ` Stephen Hemminger
  2024-10-15 18:38  0%   ` Mattias Rönnblom
  1 sibling, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-10-15 17:07 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Jerin Jacob, dev, Mattias Rönnblom, David Marchand

On Tue, 15 Oct 2024 10:49:33 +0200
Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:

> Remove the single-event enqueue and dequeue functions from the
> eventdev "ops" struct, to reduce complexity, leaving performance
> unaffected.
> 
> This ABI change has been announced as a DPDK deprication notice,
> originally scheduled for DPDK 23.11.
> 
> Mattias Rönnblom (10):
>   event/dsw: remove single event enqueue and dequeue
>   event/dlb2: remove single event enqueue and dequeue
>   event/cnxk: remove single event enqueue and dequeue
>   event/octeontx: remove single event enqueue and dequeue
>   event/sw: remove single event enqueue and dequeue
>   event/dpaa: remove single event enqueue and dequeue
>   event/dpaa2: remove single event enqueue and dequeue
>   event/opdl: remove single event enqueue and dequeue
>   event/skeleton: remove single event enqueue and dequeue
>   eventdev: remove single event enqueue and dequeue
> 
>  doc/guides/rel_notes/deprecation.rst       |  6 +--
>  drivers/event/cnxk/cn10k_eventdev.c        |  2 -
>  drivers/event/cnxk/cn10k_worker.c          | 49 ++++++++++------------
>  drivers/event/cnxk/cn10k_worker.h          |  9 ----
>  drivers/event/cnxk/cn9k_eventdev.c         |  1 -
>  drivers/event/cnxk/cn9k_worker.c           | 26 ++++--------
>  drivers/event/cnxk/cn9k_worker.h           | 17 --------
>  drivers/event/dlb2/dlb2.c                  | 33 +--------------
>  drivers/event/dpaa/dpaa_eventdev.c         | 27 +-----------
>  drivers/event/dpaa2/dpaa2_eventdev.c       |  2 -
>  drivers/event/dsw/dsw_evdev.c              |  2 -
>  drivers/event/dsw/dsw_evdev.h              |  2 -
>  drivers/event/dsw/dsw_event.c              | 12 ------
>  drivers/event/octeontx/ssovf_evdev.h       |  1 -
>  drivers/event/octeontx/ssovf_worker.c      | 40 ++----------------
>  drivers/event/opdl/opdl_evdev.c            |  2 -
>  drivers/event/skeleton/skeleton_eventdev.c |  4 --
>  drivers/event/sw/sw_evdev.c                |  2 -
>  drivers/event/sw/sw_evdev.h                |  2 -
>  drivers/event/sw/sw_evdev_worker.c         | 12 ------
>  lib/eventdev/eventdev_pmd.h                |  4 --
>  lib/eventdev/eventdev_private.c            | 22 ----------
>  lib/eventdev/rte_eventdev.h                | 21 ++--------
>  lib/eventdev/rte_eventdev_core.h           |  4 --
>  24 files changed, 43 insertions(+), 259 deletions(-)

Looks good always like to see code removed.

You missed one place though.


*Build Failed #1:
OS: RHEL94-64
Target: x86_64-native-linuxapp-clang
FAILED: drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o 
clang -Idrivers/libtmp_rte_event_dlb2.a.p -Idrivers -I../drivers -Idrivers/event/dlb2 -I../drivers/event/dlb2 -Ilib/eventdev -I../lib/eventdev -I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -Ilib/metrics -I../lib/metrics -Ilib/telemetry -I../lib/telemetry -Ilib/ring -I../lib/ring -Ilib/ethdev -I../lib/ethdev -Ilib/net -I../lib/net -Ilib/mbuf -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/meter -I../lib/meter -Ilib/hash -I../lib/hash -Ilib/rcu -I../lib/rcu -Ilib/timer -I../lib/timer -Ilib/cryptodev -I../lib/cryptodev -Ilib/dmadev -I../lib/dmadev -Ilib/pci -I../lib/pci -Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -fcolor-diagnostics -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra -Werror -std=c11 -O3 -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-missing-field-initializers -D_GNU_SOURCE -fPIC -march=native -mrtm -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -DCC_AVX512_SUPPORT -DRTE_LOG_DEFAULT_LOGTYPE=pmd.event.dlb2 -DRTE_ANNOTATE_LOCKS -Wthread-safety -MD -MQ drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o -MF drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o.d -o drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o -c ../drivers/event/dlb2/dlb2.c
../drivers/event/dlb2/dlb2.c:3303:1: error: unused function 'dlb2_event_enqueue_delayed' [-Werror,-Wunused-function]
 3303 | dlb2_event_enqueue_delayed(void *event_port,
      | ^~~~~~~~~~~~~~~~~~~~~~~~~~
1 error generated.
[2373/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_iface.c.o
[2374/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_xstats.c.o
[2375/3000] Compiling C object drivers/event/dlb2/libavx512_tmp.a.p/dlb2_avx512.c.o
[2376/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_rte_pmd_dlb2.c.o
[2377/3000] Compiling C object drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn10k_eventdev.c.o
[2378/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_pf_dlb2_pf.c.o
[2379/3000] Compiling C object drivers/libtmp_rte_event_dpaa.a.p/event_dpaa_dpaa_eventdev.c.o
[2380/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_selftest.c.o
[2381/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_pf_base_dlb2_resource.c.o
ninja: build stopped

^ permalink raw reply	[relevance 0%]

* [RFC v2 00/10] eventdev: remove single-event enqueue and dequeue
  @ 2024-10-15 18:25  3% ` Mattias Rönnblom
  2024-10-15 18:25  7%   ` [RFC v2 10/10] eventdev: remove single event " Mattias Rönnblom
  0 siblings, 1 reply; 169+ results
From: Mattias Rönnblom @ 2024-10-15 18:25 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single-event enqueue and dequeue functions from the
eventdev "ops" struct, to reduce complexity, leaving performance
unaffected.

This ABI change has been announced as a DPDK deprication notice,
originally scheduled for DPDK 23.11.

Mattias Rönnblom (10):
  event/dsw: remove single event enqueue and dequeue
  event/dlb2: remove single event enqueue and dequeue
  event/cnxk: remove single event enqueue and dequeue
  event/octeontx: remove single event enqueue and dequeue
  event/sw: remove single event enqueue and dequeue
  event/dpaa: remove single event enqueue and dequeue
  event/dpaa2: remove single event enqueue and dequeue
  event/opdl: remove single event enqueue and dequeue
  event/skeleton: remove single event enqueue and dequeue
  eventdev: remove single event enqueue and dequeue

 doc/guides/rel_notes/deprecation.rst       |  6 +--
 drivers/event/cnxk/cn10k_eventdev.c        |  2 -
 drivers/event/cnxk/cn10k_worker.c          | 49 ++++++++++------------
 drivers/event/cnxk/cn10k_worker.h          |  9 ----
 drivers/event/cnxk/cn9k_eventdev.c         |  1 -
 drivers/event/cnxk/cn9k_worker.c           | 26 ++++--------
 drivers/event/cnxk/cn9k_worker.h           | 17 --------
 drivers/event/dlb2/dlb2.c                  | 40 +-----------------
 drivers/event/dpaa/dpaa_eventdev.c         | 27 +-----------
 drivers/event/dpaa2/dpaa2_eventdev.c       | 15 -------
 drivers/event/dsw/dsw_evdev.c              |  2 -
 drivers/event/dsw/dsw_evdev.h              |  2 -
 drivers/event/dsw/dsw_event.c              | 12 ------
 drivers/event/octeontx/ssovf_evdev.h       |  1 -
 drivers/event/octeontx/ssovf_worker.c      | 40 ++----------------
 drivers/event/opdl/opdl_evdev.c            |  2 -
 drivers/event/skeleton/skeleton_eventdev.c | 29 -------------
 drivers/event/sw/sw_evdev.c                |  2 -
 drivers/event/sw/sw_evdev.h                |  2 -
 drivers/event/sw/sw_evdev_worker.c         | 12 ------
 lib/eventdev/eventdev_pmd.h                |  4 --
 lib/eventdev/eventdev_private.c            | 22 ----------
 lib/eventdev/rte_eventdev.h                | 21 ++--------
 lib/eventdev/rte_eventdev_core.h           |  4 --
 24 files changed, 43 insertions(+), 304 deletions(-)

-- 
2.43.0


^ permalink raw reply	[relevance 3%]

* [RFC v2 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-15 18:25  3% ` [RFC v2 00/10] eventdev: remove single-event " Mattias Rönnblom
@ 2024-10-15 18:25  7%   ` Mattias Rönnblom
  2024-10-15 22:00  0%     ` Stephen Hemminger
  2024-10-16 14:14  3%     ` Jerin Jacob
  0 siblings, 2 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-15 18:25 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single event enqueue and dequeue, since they did not
provide any noticable performance benefits.

This is a change of the ABI, previously announced as a deprecation
notice. These functions were not directly called by the application,
so the API remains unaffected.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 doc/guides/rel_notes/deprecation.rst |  6 +-----
 lib/eventdev/eventdev_pmd.h          |  4 ----
 lib/eventdev/eventdev_private.c      | 22 ----------------------
 lib/eventdev/rte_eventdev.h          | 21 ++++-----------------
 lib/eventdev/rte_eventdev_core.h     |  4 ----
 5 files changed, 5 insertions(+), 52 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 7bc2310bc4..6a6fd54444 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -173,11 +173,7 @@ Deprecation Notices
 
 * eventdev: The single-event (non-burst) enqueue and dequeue operations,
   used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
-  will be removed in DPDK 23.11.
-  This simplification includes changing the layout and potentially also
-  the size of the public ``rte_event_fp_ops`` struct, breaking the ABI.
-  Since these functions are not called directly by the application,
-  the API remains unaffected.
+  are removed in DPDK 24.11.
 
 * pipeline: The pipeline library legacy API (functions rte_pipeline_*)
   will be deprecated and subsequently removed in DPDK 24.11 release.
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index af855e3467..36148f8d86 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
 
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< Pointer to PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< Pointer to PMD enqueue burst function(op new variant) */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 	event_maintain_t maintain;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index b628f4a69e..6df129fc2d 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -5,15 +5,6 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev.h"
 
-static uint16_t
-dummy_event_enqueue(__rte_unused void *port,
-		    __rte_unused const struct rte_event *ev)
-{
-	RTE_EDEV_LOG_ERR(
-		"event enqueue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_enqueue_burst(__rte_unused void *port,
 			  __rte_unused const struct rte_event ev[],
@@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
-static uint16_t
-dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
-		    __rte_unused uint64_t timeout_ticks)
-{
-	RTE_EDEV_LOG_ERR(
-		"event dequeue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_dequeue_burst(__rte_unused void *port,
 			  __rte_unused struct rte_event ev[],
@@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
 	static const struct rte_event_fp_ops dummy = {
-		.enqueue = dummy_event_enqueue,
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
-		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
@@ -153,11 +133,9 @@ void
 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 		     const struct rte_eventdev *dev)
 {
-	fp_op->enqueue = dev->enqueue;
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
-	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
 	fp_op->txa_enqueue = dev->txa_enqueue;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index b5c3c16dd0..fabd1490db 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	}
 #endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->enqueue)(port, ev);
-	else
-		return fn(port, ev, nb_events);
+
+	return fn(port, ev, nb_events);
 }
 
 /**
@@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	}
 #endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->dequeue)(port, ev, timeout_ticks);
-	else
-		return (fp_ops->dequeue_burst)(port, ev, nb_events,
-					       timeout_ticks);
+
+	return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
 }
 
 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 2706d5e6c8..78b06d1f2e 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -60,16 +60,12 @@ typedef void (*event_preschedule_t)(void *port,
 struct __rte_cache_aligned rte_event_fp_ops {
 	void **data;
 	/**< points to array of internal port data pointers */
-	event_enqueue_t enqueue;
-	/**< PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< PMD enqueue burst new function. */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< PMD enqueue burst fwd function. */
-	event_dequeue_t dequeue;
-	/**< PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< PMD dequeue burst function. */
 	event_maintain_t maintain;
-- 
2.43.0


^ permalink raw reply	[relevance 7%]

* Re: [RFC 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-15 17:07  0% ` [RFC 00/10] eventdev: remove single-event " Stephen Hemminger
@ 2024-10-15 18:38  0%   ` Mattias Rönnblom
  0 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-15 18:38 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom; +Cc: Jerin Jacob, dev, David Marchand

On 2024-10-15 19:07, Stephen Hemminger wrote:
> On Tue, 15 Oct 2024 10:49:33 +0200
> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
> 
>> Remove the single-event enqueue and dequeue functions from the
>> eventdev "ops" struct, to reduce complexity, leaving performance
>> unaffected.
>>
>> This ABI change has been announced as a DPDK deprication notice,
>> originally scheduled for DPDK 23.11.
>>
>> Mattias Rönnblom (10):
>>    event/dsw: remove single event enqueue and dequeue
>>    event/dlb2: remove single event enqueue and dequeue
>>    event/cnxk: remove single event enqueue and dequeue
>>    event/octeontx: remove single event enqueue and dequeue
>>    event/sw: remove single event enqueue and dequeue
>>    event/dpaa: remove single event enqueue and dequeue
>>    event/dpaa2: remove single event enqueue and dequeue
>>    event/opdl: remove single event enqueue and dequeue
>>    event/skeleton: remove single event enqueue and dequeue
>>    eventdev: remove single event enqueue and dequeue
>>
>>   doc/guides/rel_notes/deprecation.rst       |  6 +--
>>   drivers/event/cnxk/cn10k_eventdev.c        |  2 -
>>   drivers/event/cnxk/cn10k_worker.c          | 49 ++++++++++------------
>>   drivers/event/cnxk/cn10k_worker.h          |  9 ----
>>   drivers/event/cnxk/cn9k_eventdev.c         |  1 -
>>   drivers/event/cnxk/cn9k_worker.c           | 26 ++++--------
>>   drivers/event/cnxk/cn9k_worker.h           | 17 --------
>>   drivers/event/dlb2/dlb2.c                  | 33 +--------------
>>   drivers/event/dpaa/dpaa_eventdev.c         | 27 +-----------
>>   drivers/event/dpaa2/dpaa2_eventdev.c       |  2 -
>>   drivers/event/dsw/dsw_evdev.c              |  2 -
>>   drivers/event/dsw/dsw_evdev.h              |  2 -
>>   drivers/event/dsw/dsw_event.c              | 12 ------
>>   drivers/event/octeontx/ssovf_evdev.h       |  1 -
>>   drivers/event/octeontx/ssovf_worker.c      | 40 ++----------------
>>   drivers/event/opdl/opdl_evdev.c            |  2 -
>>   drivers/event/skeleton/skeleton_eventdev.c |  4 --
>>   drivers/event/sw/sw_evdev.c                |  2 -
>>   drivers/event/sw/sw_evdev.h                |  2 -
>>   drivers/event/sw/sw_evdev_worker.c         | 12 ------
>>   lib/eventdev/eventdev_pmd.h                |  4 --
>>   lib/eventdev/eventdev_private.c            | 22 ----------
>>   lib/eventdev/rte_eventdev.h                | 21 ++--------
>>   lib/eventdev/rte_eventdev_core.h           |  4 --
>>   24 files changed, 43 insertions(+), 259 deletions(-)
> 
> Looks good always like to see code removed.
> 
> You missed one place though.
> 
> 

I thought developer mode meant -Werror, for some reason.

Fixed this and two other similar issues in RFC v2.

Thanks.

> *Build Failed #1:
> OS: RHEL94-64
> Target: x86_64-native-linuxapp-clang
> FAILED: drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o
> clang -Idrivers/libtmp_rte_event_dlb2.a.p -Idrivers -I../drivers -Idrivers/event/dlb2 -I../drivers/event/dlb2 -Ilib/eventdev -I../lib/eventdev -I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -Ilib/metrics -I../lib/metrics -Ilib/telemetry -I../lib/telemetry -Ilib/ring -I../lib/ring -Ilib/ethdev -I../lib/ethdev -Ilib/net -I../lib/net -Ilib/mbuf -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/meter -I../lib/meter -Ilib/hash -I../lib/hash -Ilib/rcu -I../lib/rcu -Ilib/timer -I../lib/timer -Ilib/cryptodev -I../lib/cryptodev -Ilib/dmadev -I../lib/dmadev -Ilib/pci -I../lib/pci -Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -fcolor-diagnostics -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra -Werror -std=c11 -O3 -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-missing-field-initializers -D_GNU_SOURCE -fPIC -march=native -mrtm -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -DCC_AVX512_SUPPORT -DRTE_LOG_DEFAULT_LOGTYPE=pmd.event.dlb2 -DRTE_ANNOTATE_LOCKS -Wthread-safety -MD -MQ drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o -MF drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o.d -o drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2.c.o -c ../drivers/event/dlb2/dlb2.c
> ../drivers/event/dlb2/dlb2.c:3303:1: error: unused function 'dlb2_event_enqueue_delayed' [-Werror,-Wunused-function]
>   3303 | dlb2_event_enqueue_delayed(void *event_port,
>        | ^~~~~~~~~~~~~~~~~~~~~~~~~~
> 1 error generated.
> [2373/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_iface.c.o
> [2374/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_xstats.c.o
> [2375/3000] Compiling C object drivers/event/dlb2/libavx512_tmp.a.p/dlb2_avx512.c.o
> [2376/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_rte_pmd_dlb2.c.o
> [2377/3000] Compiling C object drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn10k_eventdev.c.o
> [2378/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_pf_dlb2_pf.c.o
> [2379/3000] Compiling C object drivers/libtmp_rte_event_dpaa.a.p/event_dpaa_dpaa_eventdev.c.o
> [2380/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_dlb2_selftest.c.o
> [2381/3000] Compiling C object drivers/libtmp_rte_event_dlb2.a.p/event_dlb2_pf_base_dlb2_resource.c.o
> ninja: build stopped


^ permalink raw reply	[relevance 0%]

* Re: [RFC v2 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-15 18:25  7%   ` [RFC v2 10/10] eventdev: remove single event " Mattias Rönnblom
@ 2024-10-15 22:00  0%     ` Stephen Hemminger
  2024-10-16  4:36  0%       ` Mattias Rönnblom
  2024-10-16 14:14  3%     ` Jerin Jacob
  1 sibling, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-10-15 22:00 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Jerin Jacob, dev, Mattias Rönnblom, David Marchand,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren

On Tue, 15 Oct 2024 20:25:35 +0200
Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:

> Remove the single event enqueue and dequeue, since they did not
> provide any noticable performance benefits.
> 
> This is a change of the ABI, previously announced as a deprecation
> notice. These functions were not directly called by the application,
> so the API remains unaffected.
> 
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

Still have a build failure with one driver.


-------------------------------BEGIN LOGS----------------------------
####################################################################################
#### [Begin job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build and test
####################################################################################
      |         ^~~~~~
../drivers/event/cnxk/cn9k_eventdev.c: In function ‘cn9k_sso_fp_fns_set’:
../drivers/event/cnxk/cn9k_eventdev.c:576:20: error: ‘struct rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
  576 |         event_dev->enqueue = cn9k_sso_hws_enq;
      |                    ^~~~~~~
      |                    ca_enqueue
../drivers/event/cnxk/cn9k_eventdev.c:576:30: error: ‘cn9k_sso_hws_enq’ undeclared (first use in this function); did you mean ‘cn9k_sso_hws_link’?
  576 |         event_dev->enqueue = cn9k_sso_hws_enq;
      |                              ^~~~~~~~~~~~~~~~
      |                              cn9k_sso_hws_link
../drivers/event/cnxk/cn9k_eventdev.c:584:28: error: ‘struct rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
  584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
      |                            ^~~~~~~
      |                            ca_enqueue
../drivers/event/cnxk/cn9k_eventdev.c:584:38: error: ‘cn9k_sso_hws_dual_enq’ undeclared (first use in this function); did you mean ‘cn9k_sso_hws_dual_ca_enq’?
  584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
      |                                      ^~~~~~~~~~~~~~~~~~~~~
      |                                      cn9k_sso_hws_dual_ca_enq
[2451/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_64_79_seg_burst.c.o'.
[2452/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_80_95_seg_burst.c.o'.
[2453/4290] Generating rte_crypto_octeontx.sym_chk with a meson_exe.py custom command.
[2454/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_96_111_seg_burst.c.o'.
[2455/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_112_127_seg_burst.c.o'.
ninja: build stopped: subcommand failed.
##[error]Process completed with exit code 1.
####################################################################################
#### [End job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build and test
####################################################################################
--------------------------------END LOGS-----------------------------

^ permalink raw reply	[relevance 0%]

* Re: [RFC v2 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-15 22:00  0%     ` Stephen Hemminger
@ 2024-10-16  4:36  0%       ` Mattias Rönnblom
  2024-10-16  6:20  0%         ` Mattias Rönnblom
  0 siblings, 1 reply; 169+ results
From: Mattias Rönnblom @ 2024-10-16  4:36 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom
  Cc: Jerin Jacob, dev, David Marchand, Anoob Joseph, Hemant Agrawal,
	Sachin Saxena, Abdullah Sevincer, Pavan Nikhilesh,
	Shijith Thotton, Harry van Haaren

On 2024-10-16 00:00, Stephen Hemminger wrote:
> On Tue, 15 Oct 2024 20:25:35 +0200
> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
> 
>> Remove the single event enqueue and dequeue, since they did not
>> provide any noticable performance benefits.
>>
>> This is a change of the ABI, previously announced as a deprecation
>> notice. These functions were not directly called by the application,
>> so the API remains unaffected.
>>
>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> 
> Still have a build failure with one driver.
> 

The wonders of #ifdef <arch>.

> 
> -------------------------------BEGIN LOGS----------------------------
> ####################################################################################
> #### [Begin job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build and test
> ####################################################################################
>        |         ^~~~~~
> ../drivers/event/cnxk/cn9k_eventdev.c: In function ‘cn9k_sso_fp_fns_set’:
> ../drivers/event/cnxk/cn9k_eventdev.c:576:20: error: ‘struct rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
>    576 |         event_dev->enqueue = cn9k_sso_hws_enq;
>        |                    ^~~~~~~
>        |                    ca_enqueue
> ../drivers/event/cnxk/cn9k_eventdev.c:576:30: error: ‘cn9k_sso_hws_enq’ undeclared (first use in this function); did you mean ‘cn9k_sso_hws_link’?
>    576 |         event_dev->enqueue = cn9k_sso_hws_enq;
>        |                              ^~~~~~~~~~~~~~~~
>        |                              cn9k_sso_hws_link
> ../drivers/event/cnxk/cn9k_eventdev.c:584:28: error: ‘struct rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
>    584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
>        |                            ^~~~~~~
>        |                            ca_enqueue
> ../drivers/event/cnxk/cn9k_eventdev.c:584:38: error: ‘cn9k_sso_hws_dual_enq’ undeclared (first use in this function); did you mean ‘cn9k_sso_hws_dual_ca_enq’?
>    584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
>        |                                      ^~~~~~~~~~~~~~~~~~~~~
>        |                                      cn9k_sso_hws_dual_ca_enq
> [2451/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_64_79_seg_burst.c.o'.
> [2452/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_80_95_seg_burst.c.o'.
> [2453/4290] Generating rte_crypto_octeontx.sym_chk with a meson_exe.py custom command.
> [2454/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_96_111_seg_burst.c.o'.
> [2455/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at sta/event_cnxk_deq_cn9k_deq_112_127_seg_burst.c.o'.
> ninja: build stopped: subcommand failed.
> ##[error]Process completed with exit code 1.
> ####################################################################################
> #### [End job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build and test
> ####################################################################################
> --------------------------------END LOGS-----------------------------


^ permalink raw reply	[relevance 0%]

* Re: [RFC v2 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-16  4:36  0%       ` Mattias Rönnblom
@ 2024-10-16  6:20  0%         ` Mattias Rönnblom
  0 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-16  6:20 UTC (permalink / raw)
  To: Stephen Hemminger, Mattias Rönnblom
  Cc: Jerin Jacob, dev, David Marchand, Anoob Joseph, Hemant Agrawal,
	Sachin Saxena, Abdullah Sevincer, Pavan Nikhilesh,
	Shijith Thotton, Harry van Haaren, Ashwin Sekhar T K

On 2024-10-16 06:36, Mattias Rönnblom wrote:
> On 2024-10-16 00:00, Stephen Hemminger wrote:
>> On Tue, 15 Oct 2024 20:25:35 +0200
>> Mattias Rönnblom <mattias.ronnblom@ericsson.com> wrote:
>>
>>> Remove the single event enqueue and dequeue, since they did not
>>> provide any noticable performance benefits.
>>>
>>> This is a change of the ABI, previously announced as a deprecation
>>> notice. These functions were not directly called by the application,
>>> so the API remains unaffected.
>>>
>>> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>>
>> Still have a build failure with one driver.
>>
> 
> The wonders of #ifdef <arch>.
> 

I had a closer look at this, and given the elaborate macro-based 
machinery and the ARM-only conditional compilation it's better if a 
driver maintainer does the required changes.

(It's probably better to start from scratch on the cnxk patch, rather 
than looking at anything I did.)

Ashwin Sekhar or Pavan Nikhilesh, is this something you can do?

>>
>> -------------------------------BEGIN LOGS----------------------------
>> ####################################################################################
>> #### [Begin job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build 
>> and test
>> ####################################################################################
>>        |         ^~~~~~
>> ../drivers/event/cnxk/cn9k_eventdev.c: In function ‘cn9k_sso_fp_fns_set’:
>> ../drivers/event/cnxk/cn9k_eventdev.c:576:20: error: ‘struct 
>> rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
>>    576 |         event_dev->enqueue = cn9k_sso_hws_enq;
>>        |                    ^~~~~~~
>>        |                    ca_enqueue
>> ../drivers/event/cnxk/cn9k_eventdev.c:576:30: error: 
>> ‘cn9k_sso_hws_enq’ undeclared (first use in this function); did you 
>> mean ‘cn9k_sso_hws_link’?
>>    576 |         event_dev->enqueue = cn9k_sso_hws_enq;
>>        |                              ^~~~~~~~~~~~~~~~
>>        |                              cn9k_sso_hws_link
>> ../drivers/event/cnxk/cn9k_eventdev.c:584:28: error: ‘struct 
>> rte_eventdev’ has no member named ‘enqueue’; did you mean ‘ca_enqueue’?
>>    584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
>>        |                            ^~~~~~~
>>        |                            ca_enqueue
>> ../drivers/event/cnxk/cn9k_eventdev.c:584:38: error: 
>> ‘cn9k_sso_hws_dual_enq’ undeclared (first use in this function); did 
>> you mean ‘cn9k_sso_hws_dual_ca_enq’?
>>    584 |                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
>>        |                                      ^~~~~~~~~~~~~~~~~~~~~
>>        |                                      cn9k_sso_hws_dual_ca_enq
>> [2451/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at 
>> sta/event_cnxk_deq_cn9k_deq_64_79_seg_burst.c.o'.
>> [2452/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at 
>> sta/event_cnxk_deq_cn9k_deq_80_95_seg_burst.c.o'.
>> [2453/4290] Generating rte_crypto_octeontx.sym_chk with a meson_exe.py 
>> custom command.
>> [2454/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at 
>> sta/event_cnxk_deq_cn9k_deq_96_111_seg_burst.c.o'.
>> [2455/4290] Compiling C object 'drivers/a715181@@tmp_rte_event_cnxk at 
>> sta/event_cnxk_deq_cn9k_deq_112_127_seg_burst.c.o'.
>> ninja: build stopped: subcommand failed.
>> ##[error]Process completed with exit code 1.
>> ####################################################################################
>> #### [End job log] "ubuntu-22.04-gcc-shared-aarch64" at step Build and 
>> test
>> ####################################################################################
>> --------------------------------END LOGS-----------------------------
> 


^ permalink raw reply	[relevance 0%]

* Invitation: Adding support for PCIe steering tags in DPDK
@ 2024-10-15 16:54  2% Data Plane Development Kit - Meetings
  0 siblings, 0 replies; 169+ results
From: Data Plane Development Kit - Meetings @ 2024-10-15 16:54 UTC (permalink / raw)
  To: dev

[-- Attachment #1: Type: text/html, Size: 4138 bytes --]

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: invite.ics --]
[-- Type: text/calendar; method=REQUEST, Size: 5499 bytes --]

BEGIN:VCALENDAR
METHOD:REQUEST
PRODID:-//Linux Foundation//Meeting Management
VERSION:2.0
BEGIN:VTIMEZONE
TZID:America/Chicago
LAST-MODIFIED:20221029T021029Z
TZURL:http://tzurl.org/zoneinfo/America/Chicago
X-LIC-LOCATION:America/Chicago
X-PROLEPTIC-TZNAME:LMT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-055036
TZOFFSETTO:-0600
DTSTART:18831118T120924
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19180331T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU;UNTIL=19190330T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19181027T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=19211030T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19200613T020000
RDATE:19210327T020000
RDATE:19740106T020000
RDATE:19750223T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19220430T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19350428T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19220924T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19350929T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:EST
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19360301T020000
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19361115T020000
RDATE:19450930T020000
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19370425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19410427T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19370926T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19410928T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CWT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19420209T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CPT
TZOFFSETFROM:-0500
TZOFFSETTO:-0500
DTSTART:19450814T180000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19460428T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19730429T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19460929T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19540926T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19551030T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=20061029T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19760425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19860427T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU;UNTIL=20060402T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
ATTENDEE;VALUE=TEXT:dev@dpdk.org
CREATED;TZID=America/Chicago:20241015T115439
DESCRIPTION:\nYou have been invited to a meeting for Data Plane Development Kit (DPDK)\n\nWe discussed adding the PCIe steering tag support to DPDK. This feature allows for stashing the descriptors and packet data closer to the CPUs\, possibly allowing for lower latency and higher throughput. This feature requires contributions from CPU vendors and NIC vendors. The goal of the meeting is to present the next version of the API and seek support for implementation from other participants in the community. Agenda:\n- Brief introduction to the feature\n- Introduce the APIs from RFC v2 (this will be submitted to the community before the call)\n- Dependencies on kernel support - API for reading steering tags\n- Addressing ABI in advance as patches will not be ready by 24.11\n\nWays to join meeting:\n\n1. Join from PC\, Mac\, iPad\, or Android\n\nhttps://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e\n\n2. Join via audio\n\nOne tap mobile:\nUS (iOS): +12532158782\,\,94917063595#\,\,\,\,*270522# or +13462487799\,\,94917063595#\,\,\,\,*270522#\nUS (Android): +12532158782\;94917063595#\;270522# or +13462487799\;94917063595#\;270522#\n\nOr dial:\nUS: +1 253 215 8782 or +1 346 248 7799 or +1 669 900 6833 or +1 301 715 8592 or +1 312 626 6799 or +1 646 374 8656 or 877 369 0926 (Toll Free) or 855 880 1246 (Toll Free)\nCanada: +1 647 374 4685 or +1 647 558 0588 or +1 778 907 2071 or +1 204 272 7920 or +1 438 809 7799 or +1 587 328 1099 or 855 703 8985 (Toll Free)\n\nMeeting ID: 94917063595\n\nMeeting Passcode: 270522\n\n\nInternational numbers: https://zoom.us/u/alwnPIaVT\n
DTEND;TZID=America/Chicago:20241018T100000
DTSTAMP;TZID=America/Chicago:20241015T115439
DTSTART;TZID=America/Chicago:20241018T090000
LAST-MODIFIED;TZID=America/Chicago:20241015T115439
LOCATION:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
ORGANIZER;CN=Data Plane Development Kit (DPDK):MAILTO:meetings@lfx.dev
SUMMARY:Adding support for PCIe steering tags in DPDK
TZID:America/Chicago
TZNAME:America/Chicago
UID:94917063595
URL;VALUE=TEXT:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
X-MEETING-ID:94917063595
X-OCCURRENCE-ID:
X-REGISTRANT-ID:08cdd9b7-dfa9-42ea-a636-f8eaedece700
END:VEVENT
END:VCALENDAR

^ permalink raw reply	[relevance 2%]

* Updated Invitation: Adding support for PCIe steering tags in DPDK
@ 2024-10-15 16:54  2% Data Plane Development Kit - Meetings
  0 siblings, 0 replies; 169+ results
From: Data Plane Development Kit - Meetings @ 2024-10-15 16:54 UTC (permalink / raw)
  To: dev

[-- Attachment #1: Type: text/html, Size: 4152 bytes --]

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: invite.ics --]
[-- Type: text/calendar; method=REQUEST, Size: 5499 bytes --]

BEGIN:VCALENDAR
METHOD:REQUEST
PRODID:-//Linux Foundation//Meeting Management
VERSION:2.0
BEGIN:VTIMEZONE
TZID:America/Chicago
LAST-MODIFIED:20221029T021029Z
TZURL:http://tzurl.org/zoneinfo/America/Chicago
X-LIC-LOCATION:America/Chicago
X-PROLEPTIC-TZNAME:LMT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-055036
TZOFFSETTO:-0600
DTSTART:18831118T120924
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19180331T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU;UNTIL=19190330T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19181027T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=19211030T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19200613T020000
RDATE:19210327T020000
RDATE:19740106T020000
RDATE:19750223T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19220430T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19350428T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19220924T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19350929T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:EST
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19360301T020000
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19361115T020000
RDATE:19450930T020000
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19370425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19410427T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19370926T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19410928T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CWT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19420209T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CPT
TZOFFSETFROM:-0500
TZOFFSETTO:-0500
DTSTART:19450814T180000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19460428T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19730429T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19460929T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19540926T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19551030T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=20061029T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19760425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19860427T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU;UNTIL=20060402T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
ATTENDEE;VALUE=TEXT:dev@dpdk.org
CREATED;TZID=America/Chicago:20241015T115444
DESCRIPTION:\nYou have been invited to a meeting for Data Plane Development Kit (DPDK)\n\nWe discussed adding the PCIe steering tag support to DPDK. This feature allows for stashing the descriptors and packet data closer to the CPUs\, possibly allowing for lower latency and higher throughput. This feature requires contributions from CPU vendors and NIC vendors. The goal of the meeting is to present the next version of the API and seek support for implementation from other participants in the community. Agenda:\n- Brief introduction to the feature\n- Introduce the APIs from RFC v2 (this will be submitted to the community before the call)\n- Dependencies on kernel support - API for reading steering tags\n- Addressing ABI in advance as patches will not be ready by 24.11\n\nWays to join meeting:\n\n1. Join from PC\, Mac\, iPad\, or Android\n\nhttps://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e\n\n2. Join via audio\n\nOne tap mobile:\nUS (iOS): +12532158782\,\,94917063595#\,\,\,\,*270522# or +13462487799\,\,94917063595#\,\,\,\,*270522#\nUS (Android): +12532158782\;94917063595#\;270522# or +13462487799\;94917063595#\;270522#\n\nOr dial:\nUS: +1 253 215 8782 or +1 346 248 7799 or +1 669 900 6833 or +1 301 715 8592 or +1 312 626 6799 or +1 646 374 8656 or 877 369 0926 (Toll Free) or 855 880 1246 (Toll Free)\nCanada: +1 647 374 4685 or +1 647 558 0588 or +1 778 907 2071 or +1 204 272 7920 or +1 438 809 7799 or +1 587 328 1099 or 855 703 8985 (Toll Free)\n\nMeeting ID: 94917063595\n\nMeeting Passcode: 270522\n\n\nInternational numbers: https://zoom.us/u/alwnPIaVT\n
DTEND;TZID=America/Chicago:20241018T100000
DTSTAMP;TZID=America/Chicago:20241015T115444
DTSTART;TZID=America/Chicago:20241018T090000
LAST-MODIFIED;TZID=America/Chicago:20241015T115444
LOCATION:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
ORGANIZER;CN=Data Plane Development Kit (DPDK):MAILTO:meetings@lfx.dev
SUMMARY:Adding support for PCIe steering tags in DPDK
TZID:America/Chicago
TZNAME:America/Chicago
UID:94917063595
URL;VALUE=TEXT:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
X-MEETING-ID:94917063595
X-OCCURRENCE-ID:
X-REGISTRANT-ID:08cdd9b7-dfa9-42ea-a636-f8eaedece700
END:VEVENT
END:VCALENDAR

^ permalink raw reply	[relevance 2%]

* Re: [EXTERNAL] Re: [RFC PATCH 0/3] add feature arc in rte_graph
  @ 2024-10-16  9:24  3%     ` David Marchand
  2024-10-16  9:38  0%       ` Robin Jarry
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-10-16  9:24 UTC (permalink / raw)
  To: Nitin Saxena
  Cc: Jerin Jacob, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	Zhirun Yan, dev, Nitin Saxena, Robin Jarry, Christophe Fontaine

On Mon, Oct 14, 2024 at 1:12 PM Nitin Saxena <nsaxena@marvell.com> wrote:
> I had pushed non RFC patch series before -rc1 date (11th oct).
> We have an ABI change in this patch series https://patches.dpdk.org/project/dpdk/patch/20241010133111.2764712-3-nsaxena@marvell.com/
> Could you help merge this patch series in rc2 otherwise it has to wait for next LTS

Just read through the series, I am not confident with this addition.
It requires a lot of changes in the node code for supporting it, where
it should be something handled in/facilitated by the graph library
itself.
I did not read much from Robin or Christophe who have been writing
more node code than me.
I would prefer their opinion before going forward.


On the ABI topic.
As far as I can see, the only issue would be in extending struct
rte_node_register, but this can be solved with function versioning.
That change would have to be announced.

Am I missing something else?


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* Re: [EXTERNAL] Re: [RFC PATCH 0/3] add feature arc in rte_graph
  2024-10-16  9:24  3%     ` David Marchand
@ 2024-10-16  9:38  0%       ` Robin Jarry
  2024-10-16 13:50  0%         ` Nitin Saxena
  0 siblings, 1 reply; 169+ results
From: Robin Jarry @ 2024-10-16  9:38 UTC (permalink / raw)
  To: David Marchand, Nitin Saxena
  Cc: Jerin Jacob, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	Zhirun Yan, dev, Nitin Saxena, Christophe Fontaine

Hi folks,

David Marchand, Oct 16, 2024 at 11:24:
> On Mon, Oct 14, 2024 at 1:12 PM Nitin Saxena <nsaxena@marvell.com> wrote:
>> I had pushed non RFC patch series before -rc1 date (11th oct).
>> We have an ABI change in this patch series https://patches.dpdk.org/project/dpdk/patch/20241010133111.2764712-3-nsaxena@marvell.com/
>> Could you help merge this patch series in rc2 otherwise it has to wait for next LTS
>
> Just read through the series, I am not confident with this addition.
> It requires a lot of changes in the node code for supporting it, where
> it should be something handled in/facilitated by the graph library
> itself.

As far as I can tell, it will be very complicated (if not impossible) to 
determine in a generic manner whether a packet must be steered towards 
a sub tree or not. The decision *must* come from the originating node in 
some way or another.

> I did not read much from Robin or Christophe who have been writing
> more node code than me.
> I would prefer their opinion before going forward.

This series is indeed very dense. I like the concept of having 
extensible sub trees in the graph but it feels like the implementation 
is more complex than it should be.

Lacking of another solution, we went for a naive approach in grout. 
Basically, some nodes have undefined next nodes which are extended using 
a dedicated API.

https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L23-L31

This API can be used by other nodes to attach themselves to these 
extensible nodes:

https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/arp_input.c#L143
https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/ip_input.c#L124
https://github.com/DPDK/grout/blob/v0.2/modules/ip6/datapath/ip6_input.c#L122

After which, the extensible nodes can steer the packets towards the 
correct downstream edge based on the dedicated classifier field:

https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L79

Obviously, this does not natively support a per-interface sub tree 
traversal, but it can be done in the originating node based on packet 
private context data.

This raises a more important question: how can we standardize the way 
private application data is passed from node to node? And how could we 
enforce this declaratively in the node register API?

Do you think we could find some middle ground that would not require 
such extensive changes?

Cheers,
Robin


^ permalink raw reply	[relevance 0%]

* [PATCH v2 1/4] bitops: fix build for GCC without experimental API
  @ 2024-10-16 11:38  3%   ` David Marchand
  0 siblings, 0 replies; 169+ results
From: David Marchand @ 2024-10-16 11:38 UTC (permalink / raw)
  To: dev
  Cc: thomas, bruce.richardson, ktraynor, Morten Brørup,
	Jack Bond-Preston, Tyler Retzlaff, Mattias Rönnblom

Building OVS against current DPDK fails with following warnings:

In file included from .../ovs/dpdk-dir/include/rte_memory.h:18,
                 from .../ovs/dpdk-dir/include/rte_ring_core.h:29,
                 from .../ovs/dpdk-dir/include/rte_ring.h:37,
                 from .../ovs/dpdk-dir/include/rte_mempool.h:49,
                 from .../ovs/dpdk-dir/include/rte_mbuf.h:38,
                 from lib/dp-packet.h:25,
                 from lib/ofp-packet.c:20:
.../ovs/dpdk-dir/include/rte_bitops.h: In function ‘__rte_bit_assign32’:
.../ovs/dpdk-dir/include/rte_bitops.h:528:1: error:
	‘__rte_bit_set32’ is deprecated: Symbol is not yet part of
	stable ABI [-Werror=deprecated-declarations]
...

This comes from the fact that some (experimental) inline helpers
are calling other experimental API.
Hide those calls.

Fixes: 471de107ae23 ("bitops: add new bit manipulation API")

Reported-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/eal/include/rte_bitops.h | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lib/eal/include/rte_bitops.h b/lib/eal/include/rte_bitops.h
index e08e41199a..deb1fd43f2 100644
--- a/lib/eal/include/rte_bitops.h
+++ b/lib/eal/include/rte_bitops.h
@@ -525,8 +525,10 @@ __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsign
 	__RTE_GEN_BIT_OPS(,, size) \
 	__RTE_GEN_BIT_OPS(v_, volatile, size)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_GEN_BIT_OPS_SIZE(32)
 __RTE_GEN_BIT_OPS_SIZE(64)
+#endif
 
 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
 __rte_experimental \
@@ -651,8 +653,10 @@ __rte_bit_atomic_ ## variant ## test_and_assign ## size( \
 	__RTE_GEN_BIT_ATOMIC_OPS(,, size) \
 	__RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
+#endif
 
 /*------------------------ 32-bit relaxed operations ------------------------*/
 
@@ -1481,6 +1485,7 @@ rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_nam
 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
 		arg2_type, arg2_name, arg3_type, arg3_name)
 
+#ifdef ALLOW_EXPERIMENTAL_API
 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr)
 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr)
 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr)
@@ -1496,6 +1501,7 @@ __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memor
 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order)
 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value,
 	int, memory_order)
+#endif
 
 #endif
 
-- 
2.46.2


^ permalink raw reply	[relevance 3%]

* Re: [RFC v2 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-15 18:25  7%   ` [RFC v2 10/10] eventdev: remove single event " Mattias Rönnblom
  2024-10-15 22:00  0%     ` Stephen Hemminger
@ 2024-10-16 14:14  3%     ` Jerin Jacob
  1 sibling, 0 replies; 169+ results
From: Jerin Jacob @ 2024-10-16 14:14 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Jerin Jacob, dev, Mattias Rönnblom, David Marchand,
	Stephen Hemminger, Anoob Joseph, Hemant Agrawal, Sachin Saxena,
	Abdullah Sevincer, Pavan Nikhilesh, Shijith Thotton,
	Harry van Haaren

On Wed, Oct 16, 2024 at 12:14 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Remove the single event enqueue and dequeue, since they did not
> provide any noticable performance benefits.
>
> This is a change of the ABI, previously announced as a deprecation
> notice. These functions were not directly called by the application,
> so the API remains unaffected.
>
> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
> ---
>  doc/guides/rel_notes/deprecation.rst |  6 +-----
>  lib/eventdev/eventdev_pmd.h          |  4 ----
>  lib/eventdev/eventdev_private.c      | 22 ----------------------
>  lib/eventdev/rte_eventdev.h          | 21 ++++-----------------
>  lib/eventdev/rte_eventdev_core.h     |  4 ----

Update “ABI Changes” section in doc/guides/rel_notes/release_24_11.rst

^ permalink raw reply	[relevance 3%]

* [PATCH v26 15/15] doc: add release note about log library
  @ 2024-10-16 20:20  4%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-16 20:20 UTC (permalink / raw)
  To: dev
  Cc: Stephen Hemminger, Morten Brørup, Bruce Richardson, Chengwen Feng

Significant enough to add some documentation.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/rel_notes/release_24_11.rst | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index acc512c70a..5c8a0af1d0 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -276,6 +276,32 @@ API Changes
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
 
+* **Logging library changes**
+
+  * The log is initialized earlier in startup so all messages go through
+    the library.
+
+  * Added a new option to timestamp log messages, which is useful for
+    debugging delays in application and driver startup.
+
+  * Syslog related changes
+
+    * The meaning of the *--syslog* option has changed.
+      Use of syslog is controlled by the *--syslog* option.
+      The default is now *auto* which uses syslog only if stderr
+      is not a terminal device.
+
+    * The syslog facility is now set to **LOG_USER** if stderr is a terminal
+      and **LOG_DAEMON** otherwise.
+
+    * Syslog is now supported on FreeBSD (but not on Windows).
+
+  * If the application is a systemd service and the log output is being
+    sent of standard error then DPDK will switch to journal native protocol.
+
+  * Log messages can be optionally marked with colors.
+
+
 ABI Changes
 -----------
 
-- 
2.45.2


^ permalink raw reply	[relevance 4%]

* [PATCH 5/6] net/nfp: reformat the period of logs
  @ 2024-10-17  6:15  3% ` Chaoyong He
  0 siblings, 0 replies; 169+ results
From: Chaoyong He @ 2024-10-17  6:15 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Zerun Fu, Chaoyong He, Long Wu, Peng Zhang

From: Zerun Fu <zerun.fu@corigine.com>

Unified add period at the end of the sentence in log.

Signed-off-by: Zerun Fu <zerun.fu@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/common/nfp/nfp_common.c               |  12 +-
 drivers/common/nfp/nfp_common_pci.c           |   4 +-
 drivers/net/nfp/flower/nfp_conntrack.c        | 106 +++++------
 drivers/net/nfp/flower/nfp_flower.c           |  68 +++----
 drivers/net/nfp/flower/nfp_flower_cmsg.c      |  26 +--
 drivers/net/nfp/flower/nfp_flower_ctrl.c      |  20 +-
 drivers/net/nfp/flower/nfp_flower_flow.c      | 136 +++++++-------
 .../net/nfp/flower/nfp_flower_representor.c   |  42 ++---
 drivers/net/nfp/flower/nfp_flower_service.c   |   6 +-
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c            |  18 +-
 drivers/net/nfp/nfdk/nfp_nfdk_dp.c            |  18 +-
 drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c   |   4 +-
 drivers/net/nfp/nfp_cpp_bridge.c              |  62 +++----
 drivers/net/nfp/nfp_ethdev.c                  | 172 +++++++++---------
 drivers/net/nfp/nfp_ethdev_vf.c               |  16 +-
 drivers/net/nfp/nfp_ipsec.c                   |  42 ++---
 drivers/net/nfp/nfp_mtr.c                     |  86 ++++-----
 drivers/net/nfp/nfp_net_cmsg.c                |   8 +-
 drivers/net/nfp/nfp_net_common.c              |  88 ++++-----
 drivers/net/nfp/nfp_net_ctrl.c                |  14 +-
 drivers/net/nfp/nfp_net_flow.c                |  30 +--
 drivers/net/nfp/nfp_net_meta.c                |   2 +-
 drivers/net/nfp/nfp_rxtx.c                    |  34 ++--
 drivers/net/nfp/nfp_rxtx_vec_avx2.c           |  10 +-
 drivers/net/nfp/nfp_service.c                 |   8 +-
 drivers/net/nfp/nfpcore/nfp6000_pcie.c        |  12 +-
 drivers/net/nfp/nfpcore/nfp_cppcore.c         |  22 +--
 drivers/net/nfp/nfpcore/nfp_elf.c             |   2 +-
 drivers/net/nfp/nfpcore/nfp_hwinfo.c          |  16 +-
 drivers/net/nfp/nfpcore/nfp_mip.c             |   8 +-
 drivers/net/nfp/nfpcore/nfp_nffw.c            |   4 +-
 drivers/net/nfp/nfpcore/nfp_nsp.c             |  42 ++---
 drivers/net/nfp/nfpcore/nfp_nsp_cmds.c        |   2 +-
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         |  14 +-
 drivers/net/nfp/nfpcore/nfp_resource.c        |  14 +-
 drivers/net/nfp/nfpcore/nfp_rtsym.c           |  40 ++--
 drivers/net/nfp/nfpcore/nfp_sync.c            |   8 +-
 drivers/vdpa/nfp/nfp_vdpa.c                   |  42 ++---
 drivers/vdpa/nfp/nfp_vdpa_core.c              |   2 +-
 39 files changed, 630 insertions(+), 630 deletions(-)

diff --git a/drivers/common/nfp/nfp_common.c b/drivers/common/nfp/nfp_common.c
index 8cfcda7d00..0df8332dfb 100644
--- a/drivers/common/nfp/nfp_common.c
+++ b/drivers/common/nfp/nfp_common.c
@@ -25,7 +25,7 @@ nfp_reconfig_real(struct nfp_hw *hw,
 			hw->qcp_cfg);
 
 	if (hw->qcp_cfg == NULL) {
-		PMD_DRV_LOG(ERR, "Bad configuration queue pointer");
+		PMD_DRV_LOG(ERR, "Bad configuration queue pointer.");
 		return -ENXIO;
 	}
 
@@ -43,12 +43,12 @@ nfp_reconfig_real(struct nfp_hw *hw,
 			break;
 
 		if ((new & NFP_NET_CFG_UPDATE_ERR) != 0) {
-			PMD_DRV_LOG(ERR, "Reconfig error: %#08x", new);
+			PMD_DRV_LOG(ERR, "Reconfig error: %#08x.", new);
 			return -1;
 		}
 
 		if (cnt >= NFP_NET_POLL_TIMEOUT) {
-			PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms",
+			PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms.",
 					update, cnt);
 			return -EIO;
 		}
@@ -56,7 +56,7 @@ nfp_reconfig_real(struct nfp_hw *hw,
 		nanosleep(&wait, 0); /* waiting for a 1ms */
 	}
 
-	PMD_DRV_LOG(DEBUG, "Ack DONE");
+	PMD_DRV_LOG(DEBUG, "Ack DONE.");
 	return 0;
 }
 
@@ -96,7 +96,7 @@ nfp_reconfig(struct nfp_hw *hw,
 	rte_spinlock_unlock(&hw->reconfig_lock);
 
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Error NFP reconfig: ctrl=%#08x update=%#08x",
+		PMD_DRV_LOG(ERR, "Error NFP reconfig: ctrl=%#08x update=%#08x.",
 				ctrl, update);
 		return -EIO;
 	}
@@ -140,7 +140,7 @@ nfp_ext_reconfig(struct nfp_hw *hw,
 	rte_spinlock_unlock(&hw->reconfig_lock);
 
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Error NFP ext reconfig: ctrl_ext=%#08x update=%#08x",
+		PMD_DRV_LOG(ERR, "Error NFP ext reconfig: ctrl_ext=%#08x update=%#08x.",
 				ctrl_ext, update);
 		return -EIO;
 	}
diff --git a/drivers/common/nfp/nfp_common_pci.c b/drivers/common/nfp/nfp_common_pci.c
index 1a4d3f91b4..856f3917a2 100644
--- a/drivers/common/nfp/nfp_common_pci.c
+++ b/drivers/common/nfp/nfp_common_pci.c
@@ -191,7 +191,7 @@ nfp_drivers_probe(struct rte_pci_device *pci_dev,
 
 		ret = driver->probe(pci_dev);
 		if (ret < 0) {
-			PMD_DRV_LOG(ERR, "Failed to load driver %s", driver->name);
+			PMD_DRV_LOG(ERR, "Failed to load driver %s.", driver->name);
 			return ret;
 		}
 	}
@@ -210,7 +210,7 @@ nfp_common_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 	class = nfp_parse_class_options(eal_dev->devargs);
 	if (class == NFP_CLASS_INVALID) {
-		PMD_DRV_LOG(ERR, "Unsupported nfp class type: %s",
+		PMD_DRV_LOG(ERR, "Unsupported nfp class type: %s.",
 				eal_dev->devargs->args);
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index ba9134eb5e..0b21e4ee5b 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -133,7 +133,7 @@ nfp_ct_merge_table_search(struct nfp_ct_zone_entry *ze,
 	hash_key = rte_jhash(hash_data, hash_len, ze->priv->hash_seed);
 	index = rte_hash_lookup_data(ze->ct_merge_table, &hash_key, (void **)&m_ent);
 	if (index < 0) {
-		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table");
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table.");
 		return NULL;
 	}
 
@@ -150,7 +150,7 @@ nfp_ct_merge_table_add(struct nfp_ct_zone_entry *ze,
 	hash_key = rte_jhash(merge_entry, sizeof(uint64_t) * 2, ze->priv->hash_seed);
 	ret = rte_hash_add_key_data(ze->ct_merge_table, &hash_key, merge_entry);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Add to ct_merge table failed");
+		PMD_DRV_LOG(ERR, "Add to ct_merge table failed.");
 		return false;
 	}
 
@@ -167,7 +167,7 @@ nfp_ct_merge_table_delete(struct nfp_ct_zone_entry *ze,
 	hash_key = rte_jhash(m_ent, sizeof(uint64_t) * 2, ze->priv->hash_seed);
 	ret = rte_hash_del_key(ze->ct_merge_table, &hash_key);
 	if (ret < 0)
-		PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d", ret);
+		PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d.", ret);
 }
 
 static void
@@ -197,7 +197,7 @@ nfp_ct_map_table_search(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
 	index = rte_hash_lookup_data(priv->ct_map_table, &hash_key, (void **)&me);
 	if (index < 0) {
-		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table");
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table.");
 		return NULL;
 	}
 
@@ -214,7 +214,7 @@ nfp_ct_map_table_add(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
 	ret = rte_hash_add_key_data(priv->ct_map_table, &hash_key, me);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Add to ct_map table failed");
+		PMD_DRV_LOG(ERR, "Add to ct_map table failed.");
 		return false;
 	}
 
@@ -231,7 +231,7 @@ nfp_ct_map_table_delete(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
 	ret = rte_hash_del_key(priv->ct_map_table, &hash_key);
 	if (ret < 0)
-		PMD_DRV_LOG(ERR, "Delete form ct_map table failed");
+		PMD_DRV_LOG(ERR, "Delete form ct_map table failed.");
 }
 
 static void
@@ -331,7 +331,7 @@ nfp_flow_item_conf_size_get(enum rte_flow_item_type type,
 		len = sizeof(struct rte_flow_item_geneve);
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unsupported item type: %d", type);
+		PMD_DRV_LOG(ERR, "Unsupported item type: %d.", type);
 		*size = 0;
 		return false;
 	}
@@ -351,13 +351,13 @@ nfp_ct_flow_item_copy_real(const void *src,
 
 	ret = nfp_flow_item_conf_size_get(type, &len);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Get flow item conf size failed");
+		PMD_DRV_LOG(ERR, "Get flow item conf size failed.");
 		return NULL;
 	}
 
 	dst = rte_zmalloc("flow_item", len, 0);
 	if (dst == NULL) {
-		PMD_DRV_LOG(ERR, "Malloc memory for ct item failed");
+		PMD_DRV_LOG(ERR, "Malloc memory for ct item failed.");
 		return NULL;
 	}
 
@@ -375,7 +375,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src,
 	if (src->spec != NULL) {
 		dst->spec = nfp_ct_flow_item_copy_real(src->spec, src->type);
 		if (dst->spec == NULL) {
-			PMD_DRV_LOG(ERR, "Copy spec of ct item failed");
+			PMD_DRV_LOG(ERR, "Copy spec of ct item failed.");
 			goto end;
 		}
 	}
@@ -383,7 +383,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src,
 	if (src->mask != NULL) {
 		dst->mask = nfp_ct_flow_item_copy_real(src->mask, src->type);
 		if (dst->mask == NULL) {
-			PMD_DRV_LOG(ERR, "Copy mask of ct item failed");
+			PMD_DRV_LOG(ERR, "Copy mask of ct item failed.");
 			goto free_spec;
 		}
 	}
@@ -391,7 +391,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src,
 	if (src->last != NULL) {
 		dst->last = nfp_ct_flow_item_copy_real(src->last, src->type);
 		if (dst->last == NULL) {
-			PMD_DRV_LOG(ERR, "Copy last of ct item failed");
+			PMD_DRV_LOG(ERR, "Copy last of ct item failed.");
 			goto free_mask;
 		}
 	}
@@ -417,7 +417,7 @@ nfp_ct_flow_items_copy(const struct rte_flow_item *src,
 	for (loop = 0; loop < item_cnt; ++loop) {
 		ret = nfp_ct_flow_item_copy(src + loop, dst + loop);
 		if (!ret) {
-			PMD_DRV_LOG(ERR, "Copy ct item failed");
+			PMD_DRV_LOG(ERR, "Copy ct item failed.");
 			nfp_ct_flow_items_free(dst, loop);
 			return false;
 		}
@@ -490,7 +490,7 @@ nfp_ct_flow_action_free(struct rte_flow_action *action)
 		func = nfp_ct_flow_action_free_raw;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
+		PMD_DRV_LOG(ERR, "Unsupported action type: %d.", action->type);
 		break;
 	}
 
@@ -517,14 +517,14 @@ nfp_ct_flow_action_copy_real(const void *src,
 
 	dst = rte_zmalloc("flow_action", len, 0);
 	if (dst == NULL) {
-		PMD_DRV_LOG(ERR, "Malloc memory for ct action failed");
+		PMD_DRV_LOG(ERR, "Malloc memory for ct action failed.");
 		return NULL;
 	}
 
 	if (func != NULL) {
 		ret = func(src, dst);
 		if (!ret) {
-			PMD_DRV_LOG(ERR, "Copy ct action failed");
+			PMD_DRV_LOG(ERR, "Copy ct action failed.");
 			return NULL;
 		}
 
@@ -559,7 +559,7 @@ nfp_ct_flow_action_copy_raw(const void *src,
 	raw_dst->data = nfp_ct_flow_action_copy_real(raw_src->data,
 			raw_src->size, NULL);
 	if (raw_dst->data == NULL) {
-		PMD_DRV_LOG(ERR, "Copy ct action process failed");
+		PMD_DRV_LOG(ERR, "Copy ct action process failed.");
 		return false;
 	}
 
@@ -625,13 +625,13 @@ nfp_ct_flow_action_copy(const struct rte_flow_action *src,
 		func = nfp_ct_flow_action_copy_raw;
 		break;
 	default:
-		PMD_DRV_LOG(DEBUG, "Unsupported action type: %d", src->type);
+		PMD_DRV_LOG(DEBUG, "Unsupported action type: %d.", src->type);
 		return false;
 	}
 
 	dst->conf = nfp_ct_flow_action_copy_real(src->conf, len, func);
 	if (dst->conf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Copy ct action process failed");
+		PMD_DRV_LOG(DEBUG, "Copy ct action process failed.");
 		return false;
 	}
 
@@ -649,7 +649,7 @@ nfp_ct_flow_actions_copy(const struct rte_flow_action *src,
 	for (loop = 0; loop < action_cnt; ++loop) {
 		ret = nfp_ct_flow_action_copy(src + loop, dst + loop);
 		if (!ret) {
-			PMD_DRV_LOG(DEBUG, "Copy ct action failed");
+			PMD_DRV_LOG(DEBUG, "Copy ct action failed.");
 			nfp_ct_flow_actions_free(dst, loop);
 			return false;
 		}
@@ -676,7 +676,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
 
 	fe = rte_zmalloc("ct_flow_entry", sizeof(*fe), 0);
 	if (fe == NULL) {
-		PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry");
+		PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry.");
 		return NULL;
 	}
 
@@ -693,28 +693,28 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
 	fe->rule.items = rte_zmalloc("ct_flow_item",
 			sizeof(struct rte_flow_item) * item_cnt, 0);
 	if (fe->rule.items == NULL) {
-		PMD_DRV_LOG(ERR, "Could not alloc ct flow items");
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow items.");
 		goto free_flow_entry;
 	}
 
 	fe->rule.actions = rte_zmalloc("ct_flow_action",
 			sizeof(struct rte_flow_action) * action_cnt, 0);
 	if (fe->rule.actions == NULL) {
-		PMD_DRV_LOG(ERR, "Could not alloc ct flow actions");
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow actions.");
 		goto free_flow_item;
 	}
 
 	/* Deep copy of items */
 	ret = nfp_ct_flow_items_copy(items, fe->rule.items, item_cnt);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Could not deep copy ct flow items");
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow items.");
 		goto free_flow_action;
 	}
 
 	/* Deep copy of actions */
 	ret = nfp_ct_flow_actions_copy(actions, fe->rule.actions, action_cnt);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions");
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions.");
 		goto free_copied_items;
 	}
 
@@ -724,7 +724,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
 	/* Now add a ct map entry */
 	me = rte_zmalloc("ct_map_entry", sizeof(*me), 0);
 	if (me == NULL) {
-		PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed");
+		PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed.");
 		goto free_copied_actions;
 	}
 
@@ -735,7 +735,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
 	priv = repr->app_fw_flower->flow_priv;
 	ret = nfp_ct_map_table_add(priv, me);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Add into ct map table failed");
+		PMD_DRV_LOG(ERR, "Add into ct map table failed.");
 		goto free_map_entry;
 	}
 
@@ -818,7 +818,7 @@ nfp_ct_zone_table_search(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
 	index = rte_hash_lookup_data(priv->ct_zone_table, &hash_key, (void **)&ze);
 	if (index < 0) {
-		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table");
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table.");
 		return NULL;
 	}
 
@@ -835,7 +835,7 @@ nfp_ct_zone_table_add(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
 	ret = rte_hash_add_key_data(priv->ct_zone_table, &hash_key, ze);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Add to the ct_zone table failed");
+		PMD_DRV_LOG(ERR, "Add to the ct_zone table failed.");
 		return false;
 	}
 
@@ -852,7 +852,7 @@ nfp_ct_zone_table_delete(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
 	ret = rte_hash_del_key(priv->ct_zone_table, &hash_key);
 	if (ret < 0)
-		PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed");
+		PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed.");
 }
 
 static bool
@@ -880,7 +880,7 @@ nfp_ct_zone_entry_init(struct nfp_ct_zone_entry *ze,
 	ct_merge_hash_params.hash_func_init_val = priv->hash_seed;
 	ze->ct_merge_table = rte_hash_create(&ct_merge_hash_params);
 	if (ze->ct_merge_table == NULL) {
-		PMD_DRV_LOG(ERR, "CT merge table creation failed");
+		PMD_DRV_LOG(ERR, "CT merge table creation failed.");
 		return false;
 	}
 
@@ -925,13 +925,13 @@ nfp_ct_zone_entry_get(struct nfp_flow_priv *priv,
 
 		ze = rte_zmalloc("ct_zone_wc", sizeof(*ze), 0);
 		if (ze == NULL) {
-			PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry");
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry.");
 			return NULL;
 		}
 
 		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, true);
 		if (!is_ok) {
-			PMD_DRV_LOG(ERR, "Init ct zone wc entry failed");
+			PMD_DRV_LOG(ERR, "Init ct zone wc entry failed.");
 			goto free_ct_zone_entry;
 		}
 
@@ -943,19 +943,19 @@ nfp_ct_zone_entry_get(struct nfp_flow_priv *priv,
 
 		ze = rte_zmalloc("ct_zone_entry", sizeof(*ze), 0);
 		if (ze == NULL) {
-			PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry");
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry.");
 			return NULL;
 		}
 
 		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, false);
 		if (!is_ok) {
-			PMD_DRV_LOG(ERR, "Init ct zone entry failed");
+			PMD_DRV_LOG(ERR, "Init ct zone entry failed.");
 			goto free_ct_zone_entry;
 		}
 
 		is_ok = nfp_ct_zone_table_add(priv, ze);
 		if (!is_ok) {
-			PMD_DRV_LOG(ERR, "Add into ct zone table failed");
+			PMD_DRV_LOG(ERR, "Add into ct zone table failed.");
 			goto free_ct_zone_entry;
 		}
 	}
@@ -1046,7 +1046,7 @@ nfp_ct_offload_del(struct rte_eth_dev *dev,
 			if (m_ent->compiled_rule != NULL) {
 				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
 				if (ret != 0) {
-					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item.");
 					return -EINVAL;
 				}
 				m_ent->compiled_rule = NULL;
@@ -1062,7 +1062,7 @@ nfp_ct_offload_del(struct rte_eth_dev *dev,
 			if (m_ent->compiled_rule != NULL) {
 				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
 				if (ret != 0) {
-					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item.");
 					return -EINVAL;
 				}
 				m_ent->compiled_rule = NULL;
@@ -1467,7 +1467,7 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
 
 	merge_entry = rte_zmalloc("ct_merge_entry", sizeof(*merge_entry), 0);
 	if (merge_entry == NULL) {
-		PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed");
+		PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed.");
 		return false;
 	}
 
@@ -1483,14 +1483,14 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
 	merge_entry->rule.items = rte_zmalloc("ct_flow_item",
 			sizeof(struct rte_flow_item) * merge_entry->rule.items_cnt, 0);
 	if (merge_entry->rule.items == NULL) {
-		PMD_DRV_LOG(ERR, "Could not alloc items for merged flow");
+		PMD_DRV_LOG(ERR, "Could not alloc items for merged flow.");
 		goto merge_exit;
 	}
 
 	merge_entry->rule.actions = rte_zmalloc("ct_flow_action",
 			sizeof(struct rte_flow_action) * merge_entry->rule.actions_cnt, 0);
 	if (merge_entry->rule.actions == NULL) {
-		PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow");
+		PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow.");
 		goto free_items;
 	}
 
@@ -1503,14 +1503,14 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
 
 	ret = nfp_ct_merge_table_add(ze, merge_entry);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Add into ct merge table failed");
+		PMD_DRV_LOG(ERR, "Add into ct merge table failed.");
 		goto free_actions;
 	}
 
 	/* Send to firmware */
 	ret = nfp_ct_offload_add(pre_ct_entry->dev, merge_entry);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed");
+		PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed.");
 		goto merge_table_del;
 	}
 
@@ -1542,7 +1542,7 @@ nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe,
 		LIST_FOREACH(fe_tmp, &ze_src->post_ct_list, post_ct_list) {
 			ret = nfp_ct_do_flow_merge(ze_dst, fe, fe_tmp);
 			if (!ret) {
-				PMD_DRV_LOG(ERR, "Merge for ct pre flow failed");
+				PMD_DRV_LOG(ERR, "Merge for ct pre flow failed.");
 				return false;
 			}
 		}
@@ -1550,7 +1550,7 @@ nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe,
 		LIST_FOREACH(fe_tmp, &ze_src->pre_ct_list, pre_ct_list) {
 			ret = nfp_ct_do_flow_merge(ze_dst, fe_tmp, fe);
 			if (!ret) {
-				PMD_DRV_LOG(ERR, "Merge for ct post flow failed");
+				PMD_DRV_LOG(ERR, "Merge for ct post flow failed.");
 				return false;
 			}
 		}
@@ -1577,14 +1577,14 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
 	priv = representor->app_fw_flower->flow_priv;
 	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, false);
 	if (ze == NULL) {
-		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry.");
 		return false;
 	}
 
 	/* Add entry to pre_ct_list */
 	fe = nfp_ct_flow_entry_get(ze, dev, items, actions, cookie);
 	if (fe == NULL) {
-		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry.");
 		goto ct_zone_entry_free;
 	}
 
@@ -1593,7 +1593,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
 
 	ret = nfp_ct_merge_flow_entries(fe, ze, ze);
 	if (!ret) {
-		PMD_DRV_LOG(ERR, "Merge ct flow entries failed");
+		PMD_DRV_LOG(ERR, "Merge ct flow entries failed.");
 		goto ct_flow_entry_free;
 	}
 
@@ -1601,7 +1601,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
 	if (priv->ct_zone_wc != NULL) {
 		ret = nfp_ct_merge_flow_entries(fe, priv->ct_zone_wc, ze);
 		if (!ret) {
-			PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+			PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed.");
 			goto ct_flow_entry_free;
 		}
 	}
@@ -1639,7 +1639,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
 	if (ct_mask->ct_zone == 0) {
 		wildcard = true;
 	} else if (ct_mask->ct_zone != UINT16_MAX) {
-		PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported");
+		PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported.");
 		return false;
 	}
 
@@ -1647,14 +1647,14 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
 	priv = representor->app_fw_flower->flow_priv;
 	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, wildcard);
 	if (ze == NULL) {
-		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry.");
 		return false;
 	}
 
 	/* Add entry to post_ct_list */
 	fe = nfp_ct_flow_entry_get(ze, dev, items, actions, cookie);
 	if (fe == NULL) {
-		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry.");
 		goto ct_zone_entry_free;
 	}
 
@@ -1666,7 +1666,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
 			ze = (struct nfp_ct_zone_entry *)next_data;
 			ret = nfp_ct_merge_flow_entries(fe, ze, ze);
 			if (!ret) {
-				PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+				PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed.");
 				break;
 			}
 		}
diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index d38b077c09..ee4f1a2983 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -71,7 +71,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
 	/* If an error when reconfig we avoid to change hw state */
 	ret = nfp_reconfig(hw, new_ctrl, update);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic");
+		PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic.");
 		return -EIO;
 	}
 
@@ -80,7 +80,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
 	/* Setup the freelist ring */
 	ret = nfp_net_rx_freelist_setup(dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup");
+		PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup.");
 		return -EIO;
 	}
 
@@ -134,12 +134,12 @@ nfp_flower_pf_dispatch_pkts(struct nfp_net_rxq *rxq,
 
 	repr = nfp_flower_get_repr(rxq->hw_priv, port_id);
 	if (repr == NULL) {
-		PMD_RX_LOG(ERR, "Can not get repr for port %u", port_id);
+		PMD_RX_LOG(ERR, "Can not get repr for port %u.", port_id);
 		return false;
 	}
 
 	if (repr->ring == NULL || repr->ring[rxq->qidx] == NULL) {
-		PMD_RX_LOG(ERR, "No ring available for repr_port %s", repr->name);
+		PMD_RX_LOG(ERR, "No ring available for repr_port %s.", repr->name);
 		return false;
 	}
 
@@ -207,7 +207,7 @@ nfp_flower_init_vnic_common(struct nfp_net_hw_priv *hw_priv,
 
 	pf_dev = hw_priv->pf_dev;
 
-	PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p", vnic_type, hw->super.ctrl_bar);
+	PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p.", vnic_type, hw->super.ctrl_bar);
 
 	err = nfp_net_common_init(pf_dev, hw);
 	if (err != 0)
@@ -264,7 +264,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 
 	ret = nfp_flower_init_vnic_common(hw_priv, hw, "ctrl_vnic");
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not init pf vnic");
+		PMD_INIT_LOG(ERR, "Could not init pf vnic.");
 		return -EINVAL;
 	}
 
@@ -272,7 +272,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 	app_fw_flower->ctrl_ethdev = rte_zmalloc("nfp_ctrl_vnic",
 			sizeof(struct rte_eth_dev), RTE_CACHE_LINE_SIZE);
 	if (app_fw_flower->ctrl_ethdev == NULL) {
-		PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic");
+		PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic.");
 		return -ENOMEM;
 	}
 
@@ -283,7 +283,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 	eth_dev->data = rte_zmalloc("nfp_ctrl_vnic_data",
 			sizeof(struct rte_eth_dev_data), RTE_CACHE_LINE_SIZE);
 	if (eth_dev->data == NULL) {
-		PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic data");
+		PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic data.");
 		ret = -ENOMEM;
 		goto eth_dev_cleanup;
 	}
@@ -298,7 +298,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 			rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name,
 			4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node);
 	if (app_fw_flower->ctrl_pktmbuf_pool == NULL) {
-		PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed");
+		PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed.");
 		ret = -ENOMEM;
 		goto dev_data_cleanup;
 	}
@@ -312,7 +312,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 			sizeof(eth_dev->data->rx_queues[0]) * n_rxq,
 			RTE_CACHE_LINE_SIZE);
 	if (eth_dev->data->rx_queues == NULL) {
-		PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC rx queues");
+		PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC rx queues.");
 		ret = -ENOMEM;
 		goto mempool_cleanup;
 	}
@@ -321,7 +321,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 			sizeof(eth_dev->data->tx_queues[0]) * n_txq,
 			RTE_CACHE_LINE_SIZE);
 	if (eth_dev->data->tx_queues == NULL) {
-		PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC tx queues");
+		PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC tx queues.");
 		ret = -ENOMEM;
 		goto rx_queue_free;
 	}
@@ -339,7 +339,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 				sizeof(struct nfp_net_rxq), RTE_CACHE_LINE_SIZE,
 				numa_node);
 		if (rxq == NULL) {
-			PMD_DRV_LOG(ERR, "Error allocating rxq");
+			PMD_DRV_LOG(ERR, "Error allocating rxq.");
 			ret = -ENOMEM;
 			goto rx_queue_setup_cleanup;
 		}
@@ -373,7 +373,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 				hw_priv->dev_info->max_qc_size,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
-			PMD_DRV_LOG(ERR, "Error allocating rx dma");
+			PMD_DRV_LOG(ERR, "Error allocating rx dma.");
 			rte_free(rxq);
 			ret = -ENOMEM;
 			goto rx_queue_setup_cleanup;
@@ -414,7 +414,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 				sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE,
 				numa_node);
 		if (txq == NULL) {
-			PMD_DRV_LOG(ERR, "Error allocating txq");
+			PMD_DRV_LOG(ERR, "Error allocating txq.");
 			ret = -ENOMEM;
 			goto tx_queue_setup_cleanup;
 		}
@@ -431,7 +431,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 				hw_priv->dev_info->max_qc_size,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
-			PMD_DRV_LOG(ERR, "Error allocating tx dma");
+			PMD_DRV_LOG(ERR, "Error allocating tx dma.");
 			rte_free(txq);
 			ret = -ENOMEM;
 			goto tx_queue_setup_cleanup;
@@ -476,7 +476,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
 	/* Alloc sync memory zone */
 	ret = nfp_flower_service_sync_alloc(hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Alloc sync memory zone failed");
+		PMD_INIT_LOG(ERR, "Alloc sync memory zone failed.");
 		goto tx_queue_setup_cleanup;
 	}
 
@@ -593,7 +593,7 @@ nfp_flower_start_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower)
 	/* If an error when reconfig we avoid to change hw state */
 	ret = nfp_reconfig(hw, new_ctrl, update);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to reconfig ctrl vnic");
+		PMD_INIT_LOG(ERR, "Failed to reconfig ctrl vnic.");
 		return -EIO;
 	}
 
@@ -602,7 +602,7 @@ nfp_flower_start_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower)
 	/* Setup the freelist ring */
 	ret = nfp_net_rx_freelist_setup(dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error with flower ctrl vNIC freelist setup");
+		PMD_INIT_LOG(ERR, "Error with flower ctrl vNIC freelist setup.");
 		return -EIO;
 	}
 
@@ -662,7 +662,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	app_fw_flower = rte_zmalloc_socket("nfp_app_fw_flower", sizeof(*app_fw_flower),
 			RTE_CACHE_LINE_SIZE, numa_node);
 	if (app_fw_flower == NULL) {
-		PMD_INIT_LOG(ERR, "Could not malloc app fw flower");
+		PMD_INIT_LOG(ERR, "Could not malloc app fw flower.");
 		return -ENOMEM;
 	}
 
@@ -670,13 +670,13 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 
 	ret = nfp_flow_priv_init(pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Init flow priv failed");
+		PMD_INIT_LOG(ERR, "Init flow priv failed.");
 		goto app_cleanup;
 	}
 
 	ret = nfp_mtr_priv_init(pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error initializing metering private data");
+		PMD_INIT_LOG(ERR, "Error initializing metering private data.");
 		goto flow_priv_cleanup;
 	}
 
@@ -684,7 +684,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	pf_hw = rte_zmalloc_socket("nfp_pf_vnic", 2 * sizeof(struct nfp_net_hw),
 			RTE_CACHE_LINE_SIZE, numa_node);
 	if (pf_hw == NULL) {
-		PMD_INIT_LOG(ERR, "Could not malloc nfp pf vnic");
+		PMD_INIT_LOG(ERR, "Could not malloc nfp pf vnic.");
 		ret = -ENOMEM;
 		goto mtr_priv_cleanup;
 	}
@@ -694,7 +694,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
 			pf_dev->ctrl_bar_size, &pf_dev->ctrl_area);
 	if (pf_dev->ctrl_bar == NULL) {
-		PMD_INIT_LOG(ERR, "Cloud not map the PF vNIC ctrl bar");
+		PMD_INIT_LOG(ERR, "Cloud not map the PF vNIC ctrl bar.");
 		ret = -ENODEV;
 		goto vnic_cleanup;
 	}
@@ -703,7 +703,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	ext_features = nfp_rtsym_read_le(pf_dev->sym_tbl, "_abi_flower_extra_features",
 			&err);
 	if (err != 0) {
-		PMD_INIT_LOG(ERR, "Could not read extra features from fw");
+		PMD_INIT_LOG(ERR, "Could not read extra features from fw.");
 		ret = -EIO;
 		goto pf_cpp_area_cleanup;
 	}
@@ -718,13 +718,13 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 
 	ret = nfp_flower_init_vnic_common(hw_priv, pf_hw, "pf_vnic");
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not initialize flower PF vNIC");
+		PMD_INIT_LOG(ERR, "Could not initialize flower PF vNIC.");
 		goto pf_cpp_area_cleanup;
 	}
 
 	ret = nfp_net_vf_config_app_init(pf_hw, pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init sriov module");
+		PMD_INIT_LOG(ERR, "Failed to init sriov module.");
 		goto pf_cpp_area_cleanup;
 	}
 
@@ -739,35 +739,35 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	ctrl_hw->super.ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, ctrl_name,
 			pf_dev->ctrl_bar_size, &ctrl_hw->ctrl_area);
 	if (ctrl_hw->super.ctrl_bar == NULL) {
-		PMD_INIT_LOG(ERR, "Cloud not map the ctrl vNIC ctrl bar");
+		PMD_INIT_LOG(ERR, "Cloud not map the ctrl vNIC ctrl bar.");
 		ret = -ENODEV;
 		goto pf_cpp_area_cleanup;
 	}
 
 	ret = nfp_flower_init_ctrl_vnic(app_fw_flower, hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC");
+		PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC.");
 		goto ctrl_cpp_area_cleanup;
 	}
 
 	/* Start the ctrl vNIC */
 	ret = nfp_flower_start_ctrl_vnic(app_fw_flower);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not start flower ctrl vNIC");
+		PMD_INIT_LOG(ERR, "Could not start flower ctrl vNIC.");
 		goto ctrl_vnic_cleanup;
 	}
 
 	/* Start up flower services */
 	ret = nfp_flower_service_start(hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not enable flower services");
+		PMD_INIT_LOG(ERR, "Could not enable flower services.");
 		ret = -ESRCH;
 		goto ctrl_vnic_cleanup;
 	}
 
 	ret = nfp_flower_repr_create(app_fw_flower, hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not create representor ports");
+		PMD_INIT_LOG(ERR, "Could not create representor ports.");
 		goto ctrl_vnic_service_stop;
 	}
 
@@ -807,7 +807,7 @@ nfp_uninit_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	nfp_mtr_priv_uninit(pf_dev);
 	nfp_flow_priv_uninit(pf_dev);
 	if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0)
-		PMD_DRV_LOG(WARNING, "Failed to free switch domain for device");
+		PMD_DRV_LOG(WARNING, "Failed to free switch domain for device.");
 	rte_free(app_fw_flower);
 }
 
@@ -833,12 +833,12 @@ nfp_secondary_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 	pci_name = strchr(hw_priv->pf_dev->pci_dev->name, ':') + 1;
 	snprintf(port_name, RTE_ETH_NAME_MAX_LEN, "%s_repr_pf", pci_name);
 
-	PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
+	PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name);
 
 	ret = rte_eth_dev_create(&hw_priv->pf_dev->pci_dev->device, port_name, 0, NULL,
 			NULL, nfp_secondary_flower_init, hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
+		PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name);
 		return -ENODEV;
 	}
 
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index 41dd9e6e3e..92bb927196 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -29,7 +29,7 @@ nfp_flower_cmsg_init(struct nfp_app_fw_flower *app_fw_flower,
 	struct nfp_flower_cmsg_hdr *hdr;
 
 	pkt = rte_pktmbuf_mtod(m, char *);
-	PMD_DRV_LOG(DEBUG, "The flower_cmsg_init using pkt at %p", pkt);
+	PMD_DRV_LOG(DEBUG, "The flower_cmsg_init using pkt at %p.", pkt);
 
 	new_size += nfp_flower_pkt_add_metadata(app_fw_flower, m, NFP_NET_META_PORT_ID_CTRL);
 
@@ -94,7 +94,7 @@ nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(ERR, "Could not allocate mac repr cmsg");
+		PMD_DRV_LOG(ERR, "Could not allocate mac repr cmsg.");
 		return -ENOMEM;
 	}
 
@@ -131,7 +131,7 @@ nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr reify failed");
+		PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr reify failed.");
 		return -ENOMEM;
 	}
 
@@ -161,7 +161,7 @@ nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr portmod failed");
+		PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr portmod failed.");
 		return -ENOMEM;
 	}
 
@@ -263,7 +263,7 @@ nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun neigh");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun neigh.");
 		return -ENOMEM;
 	}
 
@@ -295,7 +295,7 @@ nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun neigh");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun neigh.");
 		return -ENOMEM;
 	}
 
@@ -328,7 +328,7 @@ nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower)
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr.");
 		return -ENOMEM;
 	}
 
@@ -371,7 +371,7 @@ nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower)
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr.");
 		return -ENOMEM;
 	}
 
@@ -415,7 +415,7 @@ nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule.");
 		return -ENOMEM;
 	}
 
@@ -457,7 +457,7 @@ nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac.");
 		return -ENOMEM;
 	}
 
@@ -491,7 +491,7 @@ nfp_flower_cmsg_qos_add(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos add");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos add.");
 		return -ENOMEM;
 	}
 
@@ -521,7 +521,7 @@ nfp_flower_cmsg_qos_delete(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos delete");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos delete.");
 		return -ENOMEM;
 	}
 
@@ -551,7 +551,7 @@ nfp_flower_cmsg_qos_stats(struct nfp_app_fw_flower *app_fw_flower,
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
-		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos stats");
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos stats.");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index a44663765b..3bf8b1f399 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -38,7 +38,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
 		 * DPDK just checks the queue is lower than max queues
 		 * enabled. But the queue needs to be configured.
 		 */
-		PMD_RX_LOG(ERR, "RX Bad queue");
+		PMD_RX_LOG(ERR, "RX Bad queue.");
 		return 0;
 	}
 
@@ -66,7 +66,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
 		 */
 		new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
 		if (unlikely(new_mb == NULL)) {
-			PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu",
+			PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu.",
 					rxq->port_id, rxq->qidx);
 			nfp_net_mbuf_alloc_failed(rxq);
 			break;
@@ -133,7 +133,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
 	 */
 	rte_wmb();
 	if (nb_hold >= rxq->rx_free_thresh) {
-		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu",
+		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.",
 				rxq->port_id, rxq->qidx, nb_hold, avail);
 		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
 		nb_hold = 0;
@@ -165,7 +165,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,
 		 * DPDK just checks the queue is lower than max queues
 		 * enabled. But the queue needs to be configured.
 		 */
-		PMD_TX_LOG(ERR, "Ctrl dev TX Bad queue");
+		PMD_TX_LOG(ERR, "Ctrl dev TX Bad queue.");
 		goto xmit_end;
 	}
 
@@ -180,7 +180,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,
 
 	free_descs = nfp_net_nfd3_free_tx_desc(txq);
 	if (unlikely(free_descs == 0)) {
-		PMD_TX_LOG(ERR, "Ctrl dev no free descs");
+		PMD_TX_LOG(ERR, "Ctrl dev no free descs.");
 		goto xmit_end;
 	}
 
@@ -236,7 +236,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower,
 	txq = ctrl_dev->data->tx_queues[0];
 
 	if (unlikely(mbuf->nb_segs > 1)) {
-		PMD_TX_LOG(ERR, "Multisegment packet not supported");
+		PMD_TX_LOG(ERR, "Multisegment packet not supported.");
 		return 0;
 	}
 
@@ -246,7 +246,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower,
 
 	free_descs = nfp_net_nfdk_free_tx_desc(txq);
 	if (unlikely(free_descs < NFDK_TX_DESC_PER_SIMPLE_PKT)) {
-		PMD_TX_LOG(ERR, "Ctrl dev no free descs");
+		PMD_TX_LOG(ERR, "Ctrl dev no free descs.");
 		return 0;
 	}
 
@@ -323,7 +323,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower,
 	used_descs = ktxds - txq->ktxds - txq->wr_p;
 	if (RTE_ALIGN_FLOOR(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
 			RTE_ALIGN_FLOOR(txq->wr_p + used_descs - 1, NFDK_TX_DESC_BLOCK_CNT)) {
-		PMD_TX_LOG(INFO, "Used descs cross block boundary");
+		PMD_TX_LOG(INFO, "Used descs cross block boundary.");
 		return 0;
 	}
 
@@ -442,12 +442,12 @@ nfp_flower_cmsg_port_mod_rx(struct nfp_net_hw_priv *hw_priv,
 			repr = app_fw_flower->pf_repr;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Ctrl msg for unknown port %#x", port);
+		PMD_DRV_LOG(ERR, "Ctrl msg for unknown port %#x.", port);
 		return -EINVAL;
 	}
 
 	if (repr == NULL) {
-		PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x", port);
+		PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x.", port);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c
index e65b47e007..abe67be5ae 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -316,7 +316,7 @@ nfp_mask_table_add(struct nfp_app_fw_flower *app_fw_flower,
 	mask_entry->mask_id  = mask_id;
 	mask_entry->hash_key = hash_key;
 	mask_entry->ref_cnt  = 1;
-	PMD_DRV_LOG(DEBUG, "The hash_key=%#x id=%u ref=%u", hash_key,
+	PMD_DRV_LOG(DEBUG, "The hash_key=%#x id=%u ref=%u.", hash_key,
 			mask_id, mask_entry->ref_cnt);
 
 	ret = rte_hash_add_key_data(priv->mask_table, &hash_key, mask_entry);
@@ -1058,7 +1058,7 @@ nfp_flow_key_layers_check_items(const struct rte_flow_item items[],
 
 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
 		if (item->type >= RTE_DIM(check_item_fns)) {
-			PMD_DRV_LOG(ERR, "Flow item %d unsupported", item->type);
+			PMD_DRV_LOG(ERR, "Flow item %d unsupported.", item->type);
 			return -ERANGE;
 		}
 
@@ -1068,7 +1068,7 @@ nfp_flow_key_layers_check_items(const struct rte_flow_item items[],
 		param->item = item;
 		ret = check_item_fns[item->type](param);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Flow item %d check fail", item->type);
+			PMD_DRV_LOG(ERR, "Flow item %d check fail.", item->type);
 			return ret;
 		}
 
@@ -1264,7 +1264,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
 
 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
 		if (item->type >= RTE_DIM(item_fns) || item_fns[item->type] == NULL) {
-			PMD_DRV_LOG(ERR, "Flow item %d unsupported", item->type);
+			PMD_DRV_LOG(ERR, "Flow item %d unsupported.", item->type);
 			return -ERANGE;
 		}
 
@@ -1432,22 +1432,22 @@ nfp_flow_is_validate_field_data(const struct rte_flow_field_data *data,
 		uint32_t data_width)
 {
 	if (data->level != 0) {
-		PMD_DRV_LOG(ERR, "The 'level' is not support");
+		PMD_DRV_LOG(ERR, "The 'level' is not support.");
 		return false;
 	}
 
 	if (data->tag_index != 0) {
-		PMD_DRV_LOG(ERR, "The 'tag_index' is not support");
+		PMD_DRV_LOG(ERR, "The 'tag_index' is not support.");
 		return false;
 	}
 
 	if (data->class_id != 0) {
-		PMD_DRV_LOG(ERR, "The 'class_id' is not support");
+		PMD_DRV_LOG(ERR, "The 'class_id' is not support.");
 		return false;
 	}
 
 	if (data->offset + conf_width > data_width) {
-		PMD_DRV_LOG(ERR, "The 'offset' value is too big");
+		PMD_DRV_LOG(ERR, "The 'offset' value is too big.");
 		return false;
 	}
 
@@ -1472,25 +1472,25 @@ nfp_flow_action_check_modify(struct nfp_action_calculate_param *param)
 	src_data = &conf->src;
 	if (!nfp_flow_field_id_dst_support(dst_data->field) ||
 			!nfp_flow_field_id_src_support(src_data->field)) {
-		PMD_DRV_LOG(ERR, "Not supported field id");
+		PMD_DRV_LOG(ERR, "Not supported field id.");
 		return -EINVAL;
 	}
 
 	width = conf->width;
 	if (width == 0) {
-		PMD_DRV_LOG(ERR, "No bits are required to modify");
+		PMD_DRV_LOG(ERR, "No bits are required to modify.");
 		return -EINVAL;
 	}
 
 	dst_width = nfp_flow_field_width(dst_data->field, 0);
 	src_width = nfp_flow_field_width(src_data->field, dst_width);
 	if (width > dst_width || width > src_width) {
-		PMD_DRV_LOG(ERR, "Can not modify more bits than the width of a field");
+		PMD_DRV_LOG(ERR, "Can not modify more bits than the width of a field.");
 		return -EINVAL;
 	}
 
 	if (!nfp_flow_is_validate_field_data(dst_data, width, dst_width)) {
-		PMD_DRV_LOG(ERR, "The dest field data has problem");
+		PMD_DRV_LOG(ERR, "The dest field data has problem.");
 		return -EINVAL;
 	}
 
@@ -1505,14 +1505,14 @@ nfp_flow_action_check_queue(struct nfp_action_calculate_param *param)
 
 	repr = param->dev->data->dev_private;
 	if (!nfp_flow_support_partial(repr)) {
-		PMD_DRV_LOG(ERR, "Queue action not supported");
+		PMD_DRV_LOG(ERR, "Queue action not supported.");
 		return -ENOTSUP;
 	}
 
 	queue = param->action->conf;
 	if (queue->index >= param->dev->data->nb_rx_queues ||
 			param->dev->data->rx_queues[queue->index] == NULL) {
-		PMD_DRV_LOG(ERR, "Queue index is illegal");
+		PMD_DRV_LOG(ERR, "Queue index is illegal.");
 		return -EINVAL;
 	}
 
@@ -1541,7 +1541,7 @@ nfp_flow_key_layers_check_actions(struct rte_eth_dev *dev,
 
 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
 		if (action->type >= RTE_DIM(check_action_fns)) {
-			PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type);
+			PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type);
 			return -ERANGE;
 		}
 
@@ -1551,7 +1551,7 @@ nfp_flow_key_layers_check_actions(struct rte_eth_dev *dev,
 		param.action = action;
 		ret = check_action_fns[action->type](&param);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Flow action %d calculate fail", action->type);
+			PMD_DRV_LOG(ERR, "Flow action %d calculate fail.", action->type);
 			return ret;
 		}
 	}
@@ -1790,7 +1790,7 @@ nfp_flow_key_layers_calculate_actions(struct rte_eth_dev *dev,
 		}
 
 		if (action->type >= RTE_DIM(action_fns) || action_fns[action->type] == NULL) {
-			PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type);
+			PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type);
 			return -ERANGE;
 		}
 
@@ -1800,7 +1800,7 @@ nfp_flow_key_layers_calculate_actions(struct rte_eth_dev *dev,
 
 	if (param.flag->partial_both_flag &&
 			key_ls->act_size != sizeof(struct nfp_fl_act_partial)) {
-		PMD_DRV_LOG(ERR, "Mark and Queue can not be offloaded with other actions");
+		PMD_DRV_LOG(ERR, "Mark and Queue can not be offloaded with other actions.");
 		return -ENOTSUP;
 	}
 
@@ -1831,26 +1831,26 @@ nfp_flow_key_layers_calculate(struct rte_eth_dev *dev,
 
 	ret = nfp_flow_key_layers_check_items(items, &param);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Flow items check failed");
+		PMD_DRV_LOG(ERR, "Flow items check failed.");
 		return ret;
 	}
 
 	memset(param.flag, 0, sizeof(struct nfp_item_flag));
 	ret = nfp_flow_key_layers_calculate_items(items, &param);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Flow items calculate failed");
+		PMD_DRV_LOG(ERR, "Flow items calculate failed.");
 		return ret;
 	}
 
 	ret = nfp_flow_key_layers_check_actions(dev, actions);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Flow actions check failed");
+		PMD_DRV_LOG(ERR, "Flow actions check failed.");
 		return ret;
 	}
 
 	ret = nfp_flow_key_layers_calculate_actions(dev, actions, key_ls);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Flow actions check failed");
+		PMD_DRV_LOG(ERR, "Flow actions check failed.");
 		return ret;
 	}
 
@@ -2766,7 +2766,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
 		}
 
 		if (proc == NULL) {
-			PMD_DRV_LOG(ERR, "No next item provided for %d", item->type);
+			PMD_DRV_LOG(ERR, "No next item provided for %d.", item->type);
 			ret = -ENOTSUP;
 			break;
 		}
@@ -2774,13 +2774,13 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
 		/* Perform basic sanity checks */
 		ret = nfp_flow_item_check(item, proc);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d check failed.", item->type);
 			ret = -EINVAL;
 			break;
 		}
 
 		if (proc->merge == NULL) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d no proc function.", item->type);
 			ret = -ENOTSUP;
 			break;
 		}
@@ -2798,7 +2798,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
 		param.is_mask = false;
 		ret = proc->merge(&param);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed.", item->type);
 			break;
 		}
 
@@ -2807,7 +2807,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
 		param.is_mask = true;
 		ret = proc->merge(&param);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d mask merge failed", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d mask merge failed.", item->type);
 			break;
 		}
 
@@ -3499,7 +3499,7 @@ nfp_flower_del_tun_neigh(struct nfp_app_fw_flower *app_fw_flower,
 	}
 
 	if (!flag) {
-		PMD_DRV_LOG(DEBUG, "Can not find nn entry in the nn list");
+		PMD_DRV_LOG(DEBUG, "Can not find nn entry in the nn list.");
 		return -EINVAL;
 	}
 
@@ -3544,7 +3544,7 @@ nfp_flower_del_tun_neigh(struct nfp_app_fw_flower *app_fw_flower,
 	}
 
 	if (ret != 0) {
-		PMD_DRV_LOG(DEBUG, "Failed to send the nn entry");
+		PMD_DRV_LOG(DEBUG, "Failed to send the nn entry.");
 		return -EINVAL;
 	}
 
@@ -3680,7 +3680,7 @@ nfp_pre_tun_table_search(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
 	index = rte_hash_lookup_data(priv->pre_tun_table, &hash_key, (void **)&mac_index);
 	if (index < 0) {
-		PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table");
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table.");
 		return NULL;
 	}
 
@@ -3698,7 +3698,7 @@ nfp_pre_tun_table_add(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
 	ret = rte_hash_add_key_data(priv->pre_tun_table, &hash_key, hash_data);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Add to pre tunnel table failed");
+		PMD_DRV_LOG(ERR, "Add to pre tunnel table failed.");
 		return false;
 	}
 
@@ -3716,7 +3716,7 @@ nfp_pre_tun_table_delete(struct nfp_flow_priv *priv,
 	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
 	ret = rte_hash_del_key(priv->pre_tun_table, &hash_key);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed");
+		PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed.");
 		return false;
 	}
 
@@ -3736,14 +3736,14 @@ nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr,
 
 	priv = repr->app_fw_flower->flow_priv;
 	if (priv->pre_tun_cnt >= NFP_TUN_PRE_TUN_RULE_LIMIT) {
-		PMD_DRV_LOG(ERR, "Pre tunnel table has full");
+		PMD_DRV_LOG(ERR, "Pre tunnel table has full.");
 		return -EINVAL;
 	}
 
 	entry_size = sizeof(struct nfp_pre_tun_entry);
 	entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
 	if (entry == NULL) {
-		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table.");
 		return -ENOMEM;
 	}
 
@@ -3805,7 +3805,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 	entry_size = sizeof(struct nfp_pre_tun_entry);
 	entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
 	if (entry == NULL) {
-		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table.");
 		return -ENOMEM;
 	}
 
@@ -3839,7 +3839,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 	ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr,
 			nfp_mac_idx, true);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Send tunnel mac rule failed");
+		PMD_DRV_LOG(ERR, "Send tunnel mac rule failed.");
 		ret = -EINVAL;
 		goto free_entry;
 	}
@@ -3848,7 +3848,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 		ret = nfp_flower_cmsg_pre_tunnel_rule(repr->app_fw_flower, nfp_flow_meta,
 				nfp_mac_idx, true);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Send pre tunnel rule failed");
+			PMD_DRV_LOG(ERR, "Send pre tunnel rule failed.");
 			ret = -EINVAL;
 			goto free_entry;
 		}
@@ -3856,7 +3856,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 
 	find_entry->ref_cnt = 1U;
 	if (!nfp_pre_tun_table_delete(priv, (char *)find_entry, entry_size)) {
-		PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed");
+		PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed.");
 		ret = -EINVAL;
 		goto free_entry;
 	}
@@ -3883,7 +3883,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr,
 
 	ret = nfp_pre_tun_table_check_add(repr, &nfp_mac_idx);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Pre tunnel table add failed");
+		PMD_DRV_LOG(ERR, "Pre tunnel table add failed.");
 		return -EINVAL;
 	}
 
@@ -3897,7 +3897,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr,
 	ret = nfp_flower_cmsg_tun_mac_rule(app_fw_flower, &repr->mac_addr,
 			nfp_mac_idx, false);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Send tunnel mac rule failed");
+		PMD_DRV_LOG(ERR, "Send tunnel mac rule failed.");
 		return -EINVAL;
 	}
 
@@ -3905,7 +3905,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr,
 		ret = nfp_flower_cmsg_pre_tunnel_rule(app_fw_flower, nfp_flow_meta,
 				nfp_mac_idx, false);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Send pre tunnel rule failed");
+			PMD_DRV_LOG(ERR, "Send pre tunnel rule failed.");
 			return -EINVAL;
 		}
 	}
@@ -4146,17 +4146,17 @@ nfp_flow_action_meter(struct nfp_flower_representor *representor,
 
 	mtr = nfp_mtr_find_by_mtr_id(app_fw_flower->mtr_priv, meter->mtr_id);
 	if (mtr == NULL) {
-		PMD_DRV_LOG(ERR, "Meter id not exist");
+		PMD_DRV_LOG(ERR, "Meter id not exist.");
 		return -EINVAL;
 	}
 
 	if (!mtr->enable) {
-		PMD_DRV_LOG(ERR, "Requested meter disable");
+		PMD_DRV_LOG(ERR, "Requested meter disable.");
 		return -EINVAL;
 	}
 
 	if (!mtr->shared && mtr->ref_cnt > 0) {
-		PMD_DRV_LOG(ERR, "Can not use a used unshared meter");
+		PMD_DRV_LOG(ERR, "Can not use a used unshared meter.");
 		return -EINVAL;
 	}
 
@@ -4351,7 +4351,7 @@ nfp_flow_action_compile_output(struct nfp_action_compile_param *param)
 	ret = nfp_flow_action_output(param->position, param->action,
 			param->nfp_flow_meta, output_cnt);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process output action");
+		PMD_DRV_LOG(ERR, "Failed process output action.");
 		return ret;
 	}
 
@@ -4402,7 +4402,7 @@ nfp_flow_action_compile_push_vlan(struct nfp_action_compile_param *param)
 
 	ret = nfp_flow_action_push_vlan(param->position, param->action);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN");
+		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN.");
 		return ret;
 	}
 
@@ -4541,7 +4541,7 @@ nfp_flow_action_compile_vxlan_encap(struct nfp_action_compile_param *param)
 			param->position, param->action_data, param->action,
 			param->nfp_flow_meta, &param->nfp_flow->tun);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP");
+		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP.");
 		return ret;
 	}
 
@@ -4561,7 +4561,7 @@ nfp_flow_action_compile_raw_encap(struct nfp_action_compile_param *param)
 			param->position, param->action_data, param->action,
 			param->nfp_flow_meta, &param->nfp_flow->tun);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RAW_ENCAP");
+		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RAW_ENCAP.");
 		return ret;
 	}
 
@@ -4580,7 +4580,7 @@ nfp_flow_action_compile_tnl_decap(struct nfp_action_compile_param *param)
 	ret = nfp_flow_action_tunnel_decap(param->repr, param->action,
 			param->nfp_flow_meta, param->nfp_flow);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process tunnel decap");
+		PMD_DRV_LOG(ERR, "Failed process tunnel decap.");
 		return ret;
 	}
 
@@ -4600,7 +4600,7 @@ nfp_flow_action_compile_meter(struct nfp_action_compile_param *param)
 	ret = nfp_flow_action_meter(param->repr, param->action,
 			param->position, &param->nfp_flow->mtr_id);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_METER");
+		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_METER.");
 		return -EINVAL;
 	}
 
@@ -4685,7 +4685,7 @@ nfp_flow_action_compile_rss(struct nfp_action_compile_param *param)
 	ret = nfp_flow_action_rss_add(param->repr, param->action,
 			&param->nfp_flow->rss);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RSS");
+		PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RSS.");
 		return ret;
 	}
 
@@ -4749,7 +4749,7 @@ nfp_flow_action_compile_modify(struct nfp_action_compile_param *param)
 	} else if (conf->src.field == RTE_FLOW_FIELD_VALUE) {
 		action.conf = (void *)(uintptr_t)&conf->src.value;
 	} else {
-		PMD_DRV_LOG(ERR, "The SRC field of flow modify is not right");
+		PMD_DRV_LOG(ERR, "The SRC field of flow modify is not right.");
 		return -EINVAL;
 	}
 
@@ -4759,7 +4759,7 @@ nfp_flow_action_compile_modify(struct nfp_action_compile_param *param)
 	param->action = &action;
 	ret = nfp_flow_action_compile_modify_dispatch(param, conf->dst.field);
 	if (ret != 0)
-		PMD_DRV_LOG(ERR, "Something wrong when modify dispatch");
+		PMD_DRV_LOG(ERR, "Something wrong when modify dispatch.");
 
 	/* Reload the old action pointer */
 	param->action = action_old;
@@ -4827,14 +4827,14 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
 		if (action->type >= RTE_DIM(action_compile_fns) ||
 				action_compile_fns[action->type] == NULL) {
-			PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type);
+			PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type);
 			return -ERANGE;
 		}
 
 		param.action = action;
 		ret = action_compile_fns[action->type](&param);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Flow action %d compile fail", action->type);
+			PMD_DRV_LOG(ERR, "Flow action %d compile fail.", action->type);
 			return ret;
 		}
 
@@ -4842,7 +4842,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 	}
 
 	if (nfp_flow->install_flag && total_actions == 0) {
-		PMD_DRV_LOG(ERR, "The action list is empty");
+		PMD_DRV_LOG(ERR, "The action list is empty.");
 		return -ENOTSUP;
 	}
 
@@ -5512,20 +5512,20 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl,
 			"CONFIG_FC_HOST_CTX_COUNT", &ret);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Read CTX_COUNT from symbol table failed");
+		PMD_INIT_LOG(ERR, "Read CTX_COUNT from symbol table failed.");
 		goto exit;
 	}
 
 	ctx_split = nfp_rtsym_read_le(pf_dev->sym_tbl,
 			"CONFIG_FC_HOST_CTX_SPLIT", &ret);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Read CTX_SPLIT from symbol table failed");
+		PMD_INIT_LOG(ERR, "Read CTX_SPLIT from symbol table failed.");
 		goto exit;
 	}
 
 	priv = rte_zmalloc("nfp_app_flow_priv", sizeof(struct nfp_flow_priv), 0);
 	if (priv == NULL) {
-		PMD_INIT_LOG(ERR, "NFP app flow priv creation failed");
+		PMD_INIT_LOG(ERR, "NFP app flow priv creation failed.");
 		ret = -ENOMEM;
 		goto exit;
 	}
@@ -5543,7 +5543,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		priv->mask_ids.free_list.buf = rte_zmalloc("nfp_app_mask_ids",
 				NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS, 0);
 		if (priv->mask_ids.free_list.buf == NULL) {
-			PMD_INIT_LOG(ERR, "Mask id free list creation failed");
+			PMD_INIT_LOG(ERR, "Mask id free list creation failed.");
 			ret = -ENOMEM;
 			goto free_priv;
 		}
@@ -5553,7 +5553,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		priv->stats_ids.free_list.buf = rte_zmalloc("nfp_app_stats_ids",
 				priv->stats_ring_size * NFP_FL_STATS_ELEM_RS, 0);
 		if (priv->stats_ids.free_list.buf == NULL) {
-			PMD_INIT_LOG(ERR, "Stats id free list creation failed");
+			PMD_INIT_LOG(ERR, "Stats id free list creation failed.");
 			ret = -ENOMEM;
 			goto free_mask_id;
 		}
@@ -5563,12 +5563,12 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	rte_spinlock_init(&priv->stats_lock);
 	stats_size = (ctx_count & NFP_FL_STAT_ID_STAT) |
 			((ctx_split - 1) & NFP_FL_STAT_ID_MU_NUM);
-	PMD_INIT_LOG(INFO, "The ctx_count:%0lx, ctx_split:%0lx, stats_size:%0lx ",
+	PMD_INIT_LOG(INFO, "The ctx_count:%0lx, ctx_split:%0lx, stats_size:%0lx .",
 			ctx_count, ctx_split, stats_size);
 	priv->stats = rte_zmalloc("nfp_flow_stats",
 			stats_size * sizeof(struct nfp_fl_stats), 0);
 	if (priv->stats == NULL) {
-		PMD_INIT_LOG(ERR, "Flow stats creation failed");
+		PMD_INIT_LOG(ERR, "Flow stats creation failed.");
 		ret = -ENOMEM;
 		goto free_stats_id;
 	}
@@ -5577,7 +5577,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	mask_hash_params.hash_func_init_val = priv->hash_seed;
 	priv->mask_table = rte_hash_create(&mask_hash_params);
 	if (priv->mask_table == NULL) {
-		PMD_INIT_LOG(ERR, "Mask hash table creation failed");
+		PMD_INIT_LOG(ERR, "Mask hash table creation failed.");
 		ret = -ENOMEM;
 		goto free_stats;
 	}
@@ -5587,7 +5587,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	flow_hash_params.entries = ctx_count;
 	priv->flow_table = rte_hash_create(&flow_hash_params);
 	if (priv->flow_table == NULL) {
-		PMD_INIT_LOG(ERR, "Flow hash table creation failed");
+		PMD_INIT_LOG(ERR, "Flow hash table creation failed.");
 		ret = -ENOMEM;
 		goto free_mask_table;
 	}
@@ -5597,7 +5597,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	pre_tun_hash_params.hash_func_init_val = priv->hash_seed;
 	priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params);
 	if (priv->pre_tun_table == NULL) {
-		PMD_INIT_LOG(ERR, "Pre tunnel table creation failed");
+		PMD_INIT_LOG(ERR, "Pre tunnel table creation failed.");
 		ret = -ENOMEM;
 		goto free_flow_table;
 	}
@@ -5606,7 +5606,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	ct_zone_hash_params.hash_func_init_val = priv->hash_seed;
 	priv->ct_zone_table = rte_hash_create(&ct_zone_hash_params);
 	if (priv->ct_zone_table == NULL) {
-		PMD_INIT_LOG(ERR, "CT zone table creation failed");
+		PMD_INIT_LOG(ERR, "CT zone table creation failed.");
 		ret = -ENOMEM;
 		goto free_pre_tnl_table;
 	}
@@ -5616,7 +5616,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	ct_map_hash_params.entries = ctx_count;
 	priv->ct_map_table = rte_hash_create(&ct_map_hash_params);
 	if (priv->ct_map_table == NULL) {
-		PMD_INIT_LOG(ERR, "CT map table creation failed");
+		PMD_INIT_LOG(ERR, "CT map table creation failed.");
 		ret = -ENOMEM;
 		goto free_ct_zone_table;
 	}
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 73d148ec95..fb0741c294 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -167,7 +167,7 @@ nfp_flower_repr_rx_queue_setup(struct rte_eth_dev *dev,
 	repr->ring[rx_queue_id] = rte_ring_create(ring_name, nb_rx_desc,
 			rte_socket_id(), 0);
 	if (repr->ring[rx_queue_id] == NULL) {
-		PMD_DRV_LOG(ERR, "The rte_ring_create failed for rx queue %u", rx_queue_id);
+		PMD_DRV_LOG(ERR, "The rte_ring_create failed for rx queue %u.", rx_queue_id);
 		rte_free(rxq);
 		return -ENOMEM;
 	}
@@ -259,7 +259,7 @@ nfp_flower_repr_rx_burst(void *rx_queue,
 
 	rxq = rx_queue;
 	if (unlikely(rxq == NULL)) {
-		PMD_RX_LOG(ERR, "RX Bad queue");
+		PMD_RX_LOG(ERR, "RX Bad queue.");
 		return 0;
 	}
 
@@ -275,7 +275,7 @@ nfp_flower_repr_rx_burst(void *rx_queue,
 	total_dequeue = rte_ring_dequeue_burst(repr->ring[rxq->qidx],
 			(void *)rx_pkts, nb_pkts, &available);
 	if (total_dequeue != 0) {
-		PMD_RX_LOG(DEBUG, "Port: %#x, queue: %hu received: %u, available: %u",
+		PMD_RX_LOG(DEBUG, "Port: %#x, queue: %hu received: %u, available: %u.",
 				repr->port_id, rxq->qidx, total_dequeue, available);
 
 		data_len = 0;
@@ -306,7 +306,7 @@ nfp_flower_repr_tx_burst(void *tx_queue,
 
 	txq = tx_queue;
 	if (unlikely(txq == NULL)) {
-		PMD_TX_LOG(ERR, "TX Bad queue");
+		PMD_TX_LOG(ERR, "TX Bad queue.");
 		return 0;
 	}
 
@@ -324,7 +324,7 @@ nfp_flower_repr_tx_burst(void *tx_queue,
 	pf_tx_queue = dev->data->tx_queues[txq->qidx];
 	sent = nfp_flower_pf_xmit_pkts(pf_tx_queue, tx_pkts, nb_pkts);
 	if (sent != 0) {
-		PMD_TX_LOG(DEBUG, "Port: %#x transmitted: %hu queue: %u",
+		PMD_TX_LOG(DEBUG, "Port: %#x transmitted: %hu queue: %u.",
 				repr->port_id, sent, txq->qidx);
 
 		data_len = 0;
@@ -603,7 +603,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
 	/* Allocating memory for mac addr */
 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC");
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC.");
 		return -ENOMEM;
 	}
 
@@ -650,7 +650,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
 			sizeof(struct rte_ring *) * app_fw_flower->pf_hw->max_rx_queues,
 			RTE_CACHE_LINE_SIZE, numa_node);
 	if (repr->ring == NULL) {
-		PMD_DRV_LOG(ERR, "Ring create failed for %s", ring_name);
+		PMD_DRV_LOG(ERR, "Ring create failed for %s.", ring_name);
 		return -ENOMEM;
 	}
 
@@ -683,7 +683,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
 	/* Allocating memory for mac addr */
 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC");
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC.");
 		ret = -ENOMEM;
 		goto ring_cleanup;
 	}
@@ -694,7 +694,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
 	/* Send reify message to hardware to inform it about the new repr */
 	ret = nfp_flower_cmsg_repr_reify(app_fw_flower, repr);
 	if (ret != 0) {
-		PMD_INIT_LOG(WARNING, "Failed to send repr reify message");
+		PMD_INIT_LOG(WARNING, "Failed to send repr reify message.");
 		goto mac_cleanup;
 	}
 
@@ -823,7 +823,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower,
 	/* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */
 	ret = nfp_flower_cmsg_mac_repr(app_fw_flower, pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs");
+		PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs.");
 		return ret;
 	}
 
@@ -850,7 +850,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower,
 			sizeof(struct nfp_flower_representor),
 			NULL, NULL, nfp_flower_pf_repr_init, &flower_repr);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init the pf repr");
+		PMD_INIT_LOG(ERR, "Failed to init the pf repr.");
 		return -EINVAL;
 	}
 
@@ -878,7 +878,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower,
 				sizeof(struct nfp_flower_representor),
 				NULL, NULL, nfp_flower_repr_init, &repr_init);
 		if (ret != 0) {
-			PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+			PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr.");
 			break;
 		}
 	}
@@ -909,7 +909,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower,
 				sizeof(struct nfp_flower_representor),
 				NULL, NULL, nfp_flower_repr_init, &repr_init);
 		if (ret != 0) {
-			PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+			PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr.");
 			break;
 		}
 	}
@@ -944,13 +944,13 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
 	/* Allocate a switch domain for the flower app */
 	ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id);
 	if (ret != 0)
-		PMD_INIT_LOG(WARNING, "Failed to allocate switch domain for device");
+		PMD_INIT_LOG(WARNING, "Failed to allocate switch domain for device.");
 
 	/* Now parse PCI device args passed for representor info */
 	if (pci_dev->device.devargs != NULL) {
 		ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da, 1);
 		if (ret < 0) {
-			PMD_INIT_LOG(ERR, "Devarg parse failed");
+			PMD_INIT_LOG(ERR, "Devarg parse failed.");
 			return -EINVAL;
 		}
 	}
@@ -968,7 +968,7 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
 
 	/* Only support VF representor creation via the command line */
 	if (eth_da.type != RTE_ETH_REPRESENTOR_VF) {
-		PMD_INIT_LOG(ERR, "Unsupported representor type: %d", eth_da.type);
+		PMD_INIT_LOG(ERR, "Unsupported representor type: %d.", eth_da.type);
 		return -ENOTSUP;
 	}
 
@@ -977,17 +977,17 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
 	app_fw_flower->num_vf_reprs = eth_da.nb_representor_ports -
 			pf_dev->total_phyports - 1;
 	if (pf_dev->max_vfs != 0 && pf_dev->sriov_vf < app_fw_flower->num_vf_reprs) {
-		PMD_INIT_LOG(ERR, "The VF repr nums %d is bigger than VF nums %d",
+		PMD_INIT_LOG(ERR, "The VF repr nums %d is bigger than VF nums %d.",
 				app_fw_flower->num_vf_reprs, pf_dev->sriov_vf);
 		return -ERANGE;
 	}
 
-	PMD_INIT_LOG(INFO, "%d number of VF reprs", app_fw_flower->num_vf_reprs);
-	PMD_INIT_LOG(INFO, "%d number of phyport reprs", app_fw_flower->num_phyport_reprs);
+	PMD_INIT_LOG(INFO, "%d number of VF reprs.", app_fw_flower->num_vf_reprs);
+	PMD_INIT_LOG(INFO, "%d number of phyport reprs.", app_fw_flower->num_phyport_reprs);
 
 	ret = nfp_flower_repr_alloc(app_fw_flower, hw_priv);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Representors allocation failed");
+		PMD_INIT_LOG(ERR, "Representors allocation failed.");
 		ret = -EINVAL;
 		goto domain_free;
 	}
@@ -996,7 +996,7 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
 
 domain_free:
 	if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0)
-		PMD_INIT_LOG(WARNING, "Failed to free switch domain for device");
+		PMD_INIT_LOG(WARNING, "Failed to free switch domain for device.");
 
 	return ret;
 }
diff --git a/drivers/net/nfp/flower/nfp_flower_service.c b/drivers/net/nfp/flower/nfp_flower_service.c
index aac11dbb94..b4d987a980 100644
--- a/drivers/net/nfp/flower/nfp_flower_service.c
+++ b/drivers/net/nfp/flower/nfp_flower_service.c
@@ -155,7 +155,7 @@ nfp_flower_service_start(struct nfp_net_hw_priv *hw_priv)
 
 	service_handle = nfp_flower_service_handle_get(hw_priv);
 	if (service_handle == NULL) {
-		PMD_DRV_LOG(ERR, "Can not get service handle");
+		PMD_DRV_LOG(ERR, "Can not get service handle.");
 		return -EINVAL;
 	}
 
@@ -175,7 +175,7 @@ nfp_flower_service_start(struct nfp_net_hw_priv *hw_priv)
 	/* Insert the NIC to flower service slot */
 	ret = nfp_flower_service_insert(hw_priv, service_handle);
 	if (ret == MAX_FLOWER_SERVICE_SLOT) {
-		PMD_DRV_LOG(ERR, "Flower ctrl vnic service slot over %u",
+		PMD_DRV_LOG(ERR, "Flower ctrl vnic service slot over %u.",
 				MAX_FLOWER_SERVICE_SLOT);
 		return -ENOSPC;
 	}
@@ -192,7 +192,7 @@ nfp_flower_service_stop(struct nfp_net_hw_priv *hw_priv)
 
 	service_handle = nfp_flower_service_handle_get(hw_priv);
 	if (service_handle == NULL) {
-		PMD_DRV_LOG(ERR, "Can not get service handle");
+		PMD_DRV_LOG(ERR, "Can not get service handle.");
 		return;
 	}
 
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index bfcd357774..3ffcbb2576 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -190,7 +190,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
 		switch (meta_info & NFP_NET_META_FIELD_MASK) {
 		case NFP_NET_META_VLAN:
 			if (vlan_layer > 0) {
-				PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported");
+				PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported.");
 				return -EINVAL;
 			}
 			nfp_net_meta_set_vlan(meta_data, pkt, layer);
@@ -206,7 +206,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
 			ipsec_layer++;
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "The metadata type not supported");
+			PMD_DRV_LOG(ERR, "The metadata type not supported.");
 			return -ENOTSUP;
 		}
 
@@ -249,7 +249,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
 	hw = txq->hw;
 	txds = &txq->txds[txq->wr_p];
 
-	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets",
+	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets.",
 			txq->qidx, txq->wr_p, nb_pkts);
 
 	if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_PKT * nb_pkts ||
@@ -263,7 +263,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
 	pkt = *tx_pkts;
 
 	issued_descs = 0;
-	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets", txq->qidx, nb_pkts);
+	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts);
 
 	/* Sending packets */
 	for (i = 0; i < nb_pkts && free_descs > 0; i++) {
@@ -288,7 +288,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
 
 		if (unlikely(pkt->nb_segs > 1 &&
 				(hw->super.ctrl & NFP_NET_CFG_CTRL_GATHER) == 0)) {
-			PMD_TX_LOG(ERR, "Multisegment packet not supported");
+			PMD_TX_LOG(ERR, "Multisegment packet not supported.");
 			goto xmit_end;
 		}
 
@@ -396,7 +396,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
 	if ((NFD3_TX_DESC_PER_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 0 ||
 			nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
-		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+		PMD_DRV_LOG(ERR, "Wrong nb_desc value.");
 		return -EINVAL;
 	}
 
@@ -414,7 +414,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	 * calling nfp_net_stop().
 	 */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
-		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d.",
 				queue_idx);
 		nfp_net_tx_queue_release(dev, queue_idx);
 		dev->data->tx_queues[queue_idx] = NULL;
@@ -424,7 +424,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
 			RTE_CACHE_LINE_SIZE, socket_id);
 	if (txq == NULL) {
-		PMD_DRV_LOG(ERR, "Error allocating tx dma");
+		PMD_DRV_LOG(ERR, "Error allocating tx dma.");
 		return -ENOMEM;
 	}
 
@@ -439,7 +439,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
 			NFP_MEMZONE_ALIGN, socket_id);
 	if (tz == NULL) {
-		PMD_DRV_LOG(ERR, "Error allocating tx dma");
+		PMD_DRV_LOG(ERR, "Error allocating tx dma.");
 		nfp_net_tx_queue_release(dev, queue_idx);
 		dev->data->tx_queues[queue_idx] = NULL;
 		return -ENOMEM;
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index 1b789e32dc..15867ab62f 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -195,7 +195,7 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
 			ipsec_layer++;
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "The metadata type not supported");
+			PMD_DRV_LOG(ERR, "The metadata type not supported.");
 			return -ENOTSUP;
 		}
 
@@ -236,7 +236,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
 	txq = tx_queue;
 	hw = txq->hw;
 
-	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets",
+	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets.",
 			txq->qidx, txq->wr_p, nb_pkts);
 
 	if (nfp_net_nfdk_free_tx_desc(txq) < NFDK_TX_DESC_PER_SIMPLE_PKT * nb_pkts ||
@@ -247,7 +247,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
 	if (unlikely(free_descs == 0))
 		return 0;
 
-	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets", txq->qidx, nb_pkts);
+	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts);
 
 	/* Sending packets */
 	while (npkts < nb_pkts && free_descs > 0) {
@@ -289,7 +289,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
 
 		if (unlikely(pkt->nb_segs > 1 &&
 				(hw->super.ctrl & NFP_NET_CFG_CTRL_GATHER) == 0)) {
-			PMD_TX_LOG(ERR, "Multisegment packet not supported");
+			PMD_TX_LOG(ERR, "Multisegment packet not supported.");
 			goto xmit_end;
 		}
 
@@ -381,7 +381,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue,
 		if (RTE_ALIGN_FLOOR(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
 				RTE_ALIGN_FLOOR(txq->wr_p + used_descs - 1,
 						NFDK_TX_DESC_BLOCK_CNT)) {
-			PMD_TX_LOG(INFO, "Used descs cross block boundary");
+			PMD_TX_LOG(INFO, "Used descs cross block boundary.");
 			goto xmit_end;
 		}
 
@@ -431,7 +431,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 	if ((NFDK_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 0 ||
 			(NFDK_TX_DESC_PER_SIMPLE_PKT * nb_desc) % NFDK_TX_DESC_BLOCK_CNT != 0 ||
 			nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
-		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+		PMD_DRV_LOG(ERR, "Wrong nb_desc value.");
 		return -EINVAL;
 	}
 
@@ -450,7 +450,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 	 * calling nfp_net_stop().
 	 */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
-		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d.",
 				queue_idx);
 		nfp_net_tx_queue_release(dev, queue_idx);
 		dev->data->tx_queues[queue_idx] = NULL;
@@ -460,7 +460,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
 			RTE_CACHE_LINE_SIZE, socket_id);
 	if (txq == NULL) {
-		PMD_DRV_LOG(ERR, "Error allocating tx dma");
+		PMD_DRV_LOG(ERR, "Error allocating tx dma.");
 		return -ENOMEM;
 	}
 
@@ -474,7 +474,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
 			NFP_MEMZONE_ALIGN, socket_id);
 	if (tz == NULL) {
-		PMD_DRV_LOG(ERR, "Error allocating tx dma");
+		PMD_DRV_LOG(ERR, "Error allocating tx dma.");
 		nfp_net_tx_queue_release(dev, queue_idx);
 		return -ENOMEM;
 	}
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c
index 6d1359fdb1..8354b0378b 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c
@@ -152,7 +152,7 @@ nfp_net_nfdk_vec_avx2_xmit_simple_pkts(struct nfp_net_txq *txq,
 	struct rte_mbuf **lmbuf;
 	struct nfp_net_nfdk_tx_desc *ktxds;
 
-	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %u and %hu packets",
+	PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %u and %hu packets.",
 			txq->qidx, txq->wr_p, nb_pkts);
 
 	need_txds = nb_pkts << 1;
@@ -167,7 +167,7 @@ nfp_net_nfdk_vec_avx2_xmit_simple_pkts(struct nfp_net_txq *txq,
 		return 0;
 	}
 
-	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets", txq->qidx, nb_pkts);
+	PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts);
 
 	/* Sending packets */
 	while (npkts < nb_pkts && free_descs >= NFDK_TX_DESC_PER_SIMPLE_PKT) {
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index 1cd17d0983..da7ea35d62 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -38,7 +38,7 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev)
 
 	ret = nfp_service_enable(&cpp_service, &pf_dev->cpp_service_info);
 	if (ret != 0) {
-		PMD_INIT_LOG(DEBUG, "Could not enable service %s", cpp_service.name);
+		PMD_INIT_LOG(DEBUG, "Could not enable service %s.", cpp_service.name);
 		return ret;
 	}
 
@@ -71,7 +71,7 @@ nfp_cpp_bridge_serve_write(int sockfd,
 	uint32_t tmpbuf[16];
 	struct nfp_cpp_area *area;
 
-	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__,
+	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu.", __func__,
 			sizeof(off_t), sizeof(size_t));
 
 	/* Reading the count param */
@@ -90,9 +90,9 @@ nfp_cpp_bridge_serve_write(int sockfd,
 	cpp_id = (offset >> 40) << 8;
 	nfp_offset = offset & ((1ull << 40) - 1);
 
-	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count,
+	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd.", __func__, count,
 			offset);
-	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__,
+	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd.", __func__,
 			cpp_id, nfp_offset);
 
 	/* Adjust length if not aligned */
@@ -107,14 +107,14 @@ nfp_cpp_bridge_serve_write(int sockfd,
 		area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
 				nfp_offset, curlen);
 		if (area == NULL) {
-			PMD_CPP_LOG(ERR, "Area alloc fail");
+			PMD_CPP_LOG(ERR, "Area alloc fail.");
 			return -EIO;
 		}
 
 		/* Mapping the target */
 		err = nfp_cpp_area_acquire(area);
 		if (err < 0) {
-			PMD_CPP_LOG(ERR, "Area acquire failed");
+			PMD_CPP_LOG(ERR, "Area acquire failed.");
 			nfp_cpp_area_free(area);
 			return -EIO;
 		}
@@ -124,11 +124,11 @@ nfp_cpp_bridge_serve_write(int sockfd,
 			if (len > sizeof(tmpbuf))
 				len = sizeof(tmpbuf);
 
-			PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu", __func__,
+			PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu.", __func__,
 					len, count);
 			err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
 			if (err != (int)len) {
-				PMD_CPP_LOG(ERR, "Error when receiving, %d of %zu",
+				PMD_CPP_LOG(ERR, "Error when receiving, %d of %zu.",
 						err, count);
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
@@ -137,7 +137,7 @@ nfp_cpp_bridge_serve_write(int sockfd,
 
 			err = nfp_cpp_area_write(area, pos, tmpbuf, len);
 			if (err < 0) {
-				PMD_CPP_LOG(ERR, "The nfp_cpp_area_write error");
+				PMD_CPP_LOG(ERR, "The nfp_cpp_area_write error.");
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
 				return -EIO;
@@ -177,7 +177,7 @@ nfp_cpp_bridge_serve_read(int sockfd,
 	uint32_t tmpbuf[16];
 	struct nfp_cpp_area *area;
 
-	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__,
+	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu.", __func__,
 			sizeof(off_t), sizeof(size_t));
 
 	/* Reading the count param */
@@ -196,9 +196,9 @@ nfp_cpp_bridge_serve_read(int sockfd,
 	cpp_id = (offset >> 40) << 8;
 	nfp_offset = offset & ((1ull << 40) - 1);
 
-	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count,
+	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd.", __func__, count,
 			offset);
-	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__,
+	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd.", __func__,
 			cpp_id, nfp_offset);
 
 	/* Adjust length if not aligned */
@@ -212,13 +212,13 @@ nfp_cpp_bridge_serve_read(int sockfd,
 		area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
 				nfp_offset, curlen);
 		if (area == NULL) {
-			PMD_CPP_LOG(ERR, "Area alloc failed");
+			PMD_CPP_LOG(ERR, "Area alloc failed.");
 			return -EIO;
 		}
 
 		err = nfp_cpp_area_acquire(area);
 		if (err < 0) {
-			PMD_CPP_LOG(ERR, "Area acquire failed");
+			PMD_CPP_LOG(ERR, "Area acquire failed.");
 			nfp_cpp_area_free(area);
 			return -EIO;
 		}
@@ -230,17 +230,17 @@ nfp_cpp_bridge_serve_read(int sockfd,
 
 			err = nfp_cpp_area_read(area, pos, tmpbuf, len);
 			if (err < 0) {
-				PMD_CPP_LOG(ERR, "The nfp_cpp_area_read error");
+				PMD_CPP_LOG(ERR, "The nfp_cpp_area_read error.");
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
 				return -EIO;
 			}
-			PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu", __func__,
+			PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu.", __func__,
 					len, count);
 
 			err = send(sockfd, tmpbuf, len, 0);
 			if (err != (int)len) {
-				PMD_CPP_LOG(ERR, "Error when sending: %d of %zu",
+				PMD_CPP_LOG(ERR, "Error when sending: %d of %zu.",
 						err, count);
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
@@ -278,39 +278,39 @@ nfp_cpp_bridge_serve_ioctl(int sockfd,
 	/* Reading now the IOCTL command */
 	err = recv(sockfd, &cmd, 4, 0);
 	if (err != 4) {
-		PMD_CPP_LOG(ERR, "Read error from socket");
+		PMD_CPP_LOG(ERR, "Read error from socket.");
 		return -EIO;
 	}
 
 	/* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
 	if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) {
-		PMD_CPP_LOG(ERR, "Unknown cmd %d", cmd);
+		PMD_CPP_LOG(ERR, "Unknown cmd %d.", cmd);
 		return -EINVAL;
 	}
 
 	err = recv(sockfd, &ident_size, 4, 0);
 	if (err != 4) {
-		PMD_CPP_LOG(ERR, "Read error from socket");
+		PMD_CPP_LOG(ERR, "Read error from socket.");
 		return -EIO;
 	}
 
 	tmp = nfp_cpp_model(cpp);
 
-	PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x", __func__, tmp);
+	PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x.", __func__, tmp);
 
 	err = send(sockfd, &tmp, 4, 0);
 	if (err != 4) {
-		PMD_CPP_LOG(ERR, "Error writing to socket");
+		PMD_CPP_LOG(ERR, "Error writing to socket.");
 		return -EIO;
 	}
 
 	tmp = nfp_cpp_interface(cpp);
 
-	PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x", __func__, tmp);
+	PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x.", __func__, tmp);
 
 	err = send(sockfd, &tmp, 4, 0);
 	if (err != 4) {
-		PMD_CPP_LOG(ERR, "Error writing to socket");
+		PMD_CPP_LOG(ERR, "Error writing to socket.");
 		return -EIO;
 	}
 
@@ -347,7 +347,7 @@ nfp_cpp_bridge_service_func(void *args)
 	unlink(socket_handle);
 	sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
 	if (sockfd < 0) {
-		PMD_CPP_LOG(ERR, "Socket creation error. Service failed");
+		PMD_CPP_LOG(ERR, "Socket creation error. Service failed.");
 		return -EIO;
 	}
 
@@ -361,14 +361,14 @@ nfp_cpp_bridge_service_func(void *args)
 	ret = bind(sockfd, (const struct sockaddr *)&address,
 			sizeof(struct sockaddr));
 	if (ret < 0) {
-		PMD_CPP_LOG(ERR, "Bind error (%d). Service failed", errno);
+		PMD_CPP_LOG(ERR, "Bind error (%d). Service failed.", errno);
 		close(sockfd);
 		return ret;
 	}
 
 	ret = listen(sockfd, 20);
 	if (ret < 0) {
-		PMD_CPP_LOG(ERR, "Listen error(%d). Service failed", errno);
+		PMD_CPP_LOG(ERR, "Listen error(%d). Service failed.", errno);
 		close(sockfd);
 		return ret;
 	}
@@ -380,8 +380,8 @@ nfp_cpp_bridge_service_func(void *args)
 			if (errno == EAGAIN || errno == EWOULDBLOCK)
 				continue;
 
-			PMD_CPP_LOG(ERR, "Accept call error (%d)", errno);
-			PMD_CPP_LOG(ERR, "Service failed");
+			PMD_CPP_LOG(ERR, "Accept call error (%d).", errno);
+			PMD_CPP_LOG(ERR, "Service failed.");
 			close(sockfd);
 			return -EIO;
 		}
@@ -389,11 +389,11 @@ nfp_cpp_bridge_service_func(void *args)
 		for (;;) {
 			ret = recv(datafd, &op, 4, 0);
 			if (ret <= 0) {
-				PMD_CPP_LOG(DEBUG, "%s: socket close", __func__);
+				PMD_CPP_LOG(DEBUG, "%s: socket close.", __func__);
 				break;
 			}
 
-			PMD_CPP_LOG(DEBUG, "%s: getting op %u", __func__, op);
+			PMD_CPP_LOG(DEBUG, "%s: getting op %u.", __func__, op);
 
 			if (op == NFP_BRIDGE_OP_READ)
 				nfp_cpp_bridge_serve_read(datafd, cpp);
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 2b55076a18..f2e7bc1eb4 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -61,7 +61,7 @@ nfp_devarg_handle_int(const char *key,
 
 	*num = strtoul(value, &end_ptr, 10);
 	if (*num == ULONG_MAX) {
-		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
+		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param.", key, value);
 		return -ERANGE;
 	} else if (value == end_ptr) {
 		return -EPERM;
@@ -84,7 +84,7 @@ nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist,
 		return 0;
 
 	if (count > 1) {
-		PMD_DRV_LOG(ERR, "Too much bool arguments: %s", key_match);
+		PMD_DRV_LOG(ERR, "Too much bool arguments: %s.", key_match);
 		return -EINVAL;
 	}
 
@@ -97,7 +97,7 @@ nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist,
 	} else if (value == 0) {
 		*value_ret = false;
 	} else {
-		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
+		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1.",
 				key_match);
 		return -EINVAL;
 	}
@@ -389,7 +389,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
 		if (app_fw_nic->multiport) {
 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
-					"with NFP multiport PF");
+					"with NFP multiport PF.");
 				return -EINVAL;
 		}
 
@@ -403,7 +403,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 
 			if (dev->data->nb_rx_queues > 1) {
 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
-						"supports 1 queue with UIO");
+						"supports 1 queue with UIO.");
 				return -EIO;
 			}
 		}
@@ -418,7 +418,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 
 	/* Checking MTU set */
 	if (dev->data->mtu > net_hw->flbufsz) {
-		PMD_INIT_LOG(ERR, "MTU (%u) can not be larger than the current NFP_FRAME_SIZE (%u)",
+		PMD_INIT_LOG(ERR, "MTU (%u) can not be larger than the current NFP_FRAME_SIZE (%u).",
 				dev->data->mtu, net_hw->flbufsz);
 		return -ERANGE;
 	}
@@ -573,7 +573,7 @@ nfp_net_beat_timer(void *arg)
 	/* Beat once per second. */
 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
 			(void *)multi_pf) < 0) {
-		PMD_DRV_LOG(ERR, "Error setting alarm");
+		PMD_DRV_LOG(ERR, "Error setting alarm.");
 	}
 }
 
@@ -620,7 +620,7 @@ nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
 {
 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
 			(void *)multi_pf) < 0) {
-		PMD_DRV_LOG(ERR, "Error setting alarm");
+		PMD_DRV_LOG(ERR, "Error setting alarm.");
 		return -EIO;
 	}
 
@@ -863,20 +863,20 @@ nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
 	tnl_type   = tunnel_udp->prot_type;
 
 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
-		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
+		PMD_DRV_LOG(ERR, "Not VXLAN tunnel.");
 		return -ENOTSUP;
 	}
 
 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
+		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx.");
 		return -EINVAL;
 	}
 
 	if (hw->vxlan_usecnt[idx] == 0) {
 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Failed set vxlan port");
+			PMD_DRV_LOG(ERR, "Failed set vxlan port.");
 			return -EINVAL;
 		}
 	}
@@ -901,13 +901,13 @@ nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
 	tnl_type   = tunnel_udp->prot_type;
 
 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
-		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
+		PMD_DRV_LOG(ERR, "Not VXLAN tunnel.");
 		return -ENOTSUP;
 	}
 
 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
-		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
+		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx.");
 		return -EINVAL;
 	}
 
@@ -916,7 +916,7 @@ nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
 	if (hw->vxlan_usecnt[idx] == 0) {
 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Failed set vxlan port");
+			PMD_DRV_LOG(ERR, "Failed set vxlan port.");
 			return -EINVAL;
 		}
 	}
@@ -1023,14 +1023,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 
 	port = net_hw->idx;
 	if (port > 7) {
-		PMD_DRV_LOG(ERR, "Port value is wrong");
+		PMD_DRV_LOG(ERR, "Port value is wrong.");
 		return -ENODEV;
 	}
 
 	hw = &net_hw->super;
 
 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
-			"NFP internal port number: %d", port, net_hw->nfp_idx);
+			"NFP internal port number: %d.", port, net_hw->nfp_idx);
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
@@ -1042,8 +1042,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	net_hw->mac_stats = pf_dev->mac_stats_bar +
 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
 
-	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p", hw->ctrl_bar);
-	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
+	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar);
+	PMD_INIT_LOG(DEBUG, "MAC stats: %p.", net_hw->mac_stats);
 
 	err = nfp_net_common_init(pf_dev, net_hw);
 	if (err != 0)
@@ -1051,13 +1051,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 
 	err = nfp_net_tlv_caps_parse(eth_dev);
 	if (err != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
+		PMD_INIT_LOG(ERR, "Failed to parser TLV caps.");
 		return err;
 	}
 
 	err = nfp_ipsec_init(eth_dev);
 	if (err != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
+		PMD_INIT_LOG(ERR, "Failed to init IPsec module.");
 		return err;
 	}
 
@@ -1079,7 +1079,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
 
-	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
+	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.",
 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
 
 	nfp_net_cfg_queue_setup(net_hw);
@@ -1097,7 +1097,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	if ((port == 0 || pf_dev->multi_pf.enabled)) {
 		err = nfp_net_vf_config_app_init(net_hw, pf_dev);
 		if (err != 0) {
-			PMD_INIT_LOG(ERR, "Failed to init sriov module");
+			PMD_INIT_LOG(ERR, "Failed to init sriov module.");
 			goto xstats_free;
 		}
 	}
@@ -1105,7 +1105,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	/* Allocating memory for mac addr */
 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
+		PMD_INIT_LOG(ERR, "Failed to space for MAC address.");
 		err = -ENOMEM;
 		goto xstats_free;
 	}
@@ -1120,7 +1120,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
 
 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
-		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
+		PMD_INIT_LOG(INFO, "Using random mac address for port %d.", port);
 		/* Using random mac addresses for VFs */
 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
@@ -1153,7 +1153,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
 		err = nfp_net_flow_priv_init(pf_dev, port);
 		if (err != 0) {
-			PMD_INIT_LOG(ERR, "Init net flow priv failed");
+			PMD_INIT_LOG(ERR, "Init net flow priv failed.");
 			goto txrwb_free;
 		}
 	}
@@ -1182,7 +1182,7 @@ nfp_net_device_activate(struct nfp_pf_dev *pf_dev)
 	if (multi_pf->enabled && multi_pf->function_id != 0) {
 		nsp = nfp_nsp_open(pf_dev->cpp);
 		if (nsp == NULL) {
-			PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+			PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
 			return -EIO;
 		}
 
@@ -1224,7 +1224,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
 
-	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
 	if (access(fw_name, F_OK) == 0)
 		return 0;
 
@@ -1232,7 +1232,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
 			pf_dev->pci_dev->name);
 
-	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
 	if (access(fw_name, F_OK) == 0)
 		return 0;
 
@@ -1240,7 +1240,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
 	if (nfp_fw_model == NULL) {
 		nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "assembly.partno");
 		if (nfp_fw_model == NULL) {
-			PMD_DRV_LOG(ERR, "Firmware model NOT found");
+			PMD_DRV_LOG(ERR, "Firmware model NOT found.");
 			return -EIO;
 		}
 	}
@@ -1248,7 +1248,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
 	/* And then try the model name */
 	snprintf(card_desc, sizeof(card_desc), "%s.nffw", nfp_fw_model);
 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
-	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
 	if (access(fw_name, F_OK) == 0)
 		return 0;
 
@@ -1257,7 +1257,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
 			nfp_fw_model, pf_dev->nfp_eth_table->count,
 			pf_dev->nfp_eth_table->ports[0].speed / 1000);
 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
-	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
 	if (access(fw_name, F_OK) == 0)
 		return 0;
 
@@ -1278,7 +1278,7 @@ nfp_fw_upload(struct nfp_nsp *nsp,
 		return -ENOENT;
 	}
 
-	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
+	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu.",
 			fw_name, fsize);
 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
@@ -1287,7 +1287,7 @@ nfp_fw_upload(struct nfp_nsp *nsp,
 		return -EIO;
 	}
 
-	PMD_DRV_LOG(INFO, "Done");
+	PMD_DRV_LOG(INFO, "Done.");
 
 	free(fw_buf);
 
@@ -1327,11 +1327,11 @@ nfp_fw_check_change(struct nfp_cpp *cpp,
 	nfp_net_get_fw_version(cpp, &old_version);
 
 	if (new_version != old_version) {
-		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
+		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u.",
 				new_version, old_version);
 		*fw_changed = true;
 	} else {
-		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
+		PMD_DRV_LOG(INFO, "FW version is not changed and is %u.", new_version);
 		*fw_changed = false;
 	}
 
@@ -1380,7 +1380,7 @@ nfp_fw_reload(struct nfp_nsp *nsp,
 	if (reset_flag) {
 		err = nfp_nsp_device_soft_reset(nsp);
 		if (err != 0) {
-			PMD_DRV_LOG(ERR, "NFP firmware soft reset failed");
+			PMD_DRV_LOG(ERR, "NFP firmware soft reset failed.");
 			return err;
 		}
 	}
@@ -1395,7 +1395,7 @@ nfp_fw_reload(struct nfp_nsp *nsp,
 
 	err = nfp_fw_upload(nsp, fw_name);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "NFP firmware load failed");
+		PMD_DRV_LOG(ERR, "NFP firmware load failed.");
 		return err;
 	}
 
@@ -1447,7 +1447,7 @@ nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
 				beat[port_num] = 0;
 				if (*reload_fw) {
 					*reload_fw = false;
-					PMD_DRV_LOG(ERR, "The param %s does not work",
+					PMD_DRV_LOG(ERR, "The param %s does not work.",
 							NFP_PF_FORCE_RELOAD_FW);
 				}
 			}
@@ -1581,13 +1581,13 @@ nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
 
 	err = nfp_net_keepalive_init(pf_dev->cpp, multi_pf);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "NFP init beat failed");
+		PMD_DRV_LOG(ERR, "NFP init beat failed.");
 		return err;
 	}
 
 	err = nfp_net_keepalive_start(multi_pf);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "NFP write beat failed");
+		PMD_DRV_LOG(ERR, "NFP write beat failed.");
 		goto keepalive_uninit;
 	}
 
@@ -1660,7 +1660,7 @@ nfp_fw_policy_value_get(struct nfp_nsp *nsp,
 
 	ret = nfp_strtol(buf, 0, &val);
 	if (ret != 0 || val < 0 || val > max_val) {
-		PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring",
+		PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring.",
 				buf, key);
 		/* Fall back to the default value */
 		ret = nfp_strtol(default_val, 0, &val);
@@ -1685,7 +1685,7 @@ nfp_fw_setup(struct nfp_pf_dev *pf_dev,
 
 	nsp = nfp_nsp_open(pf_dev->cpp);
 	if (nsp == NULL) {
-		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
 		return -EIO;
 	}
 
@@ -1744,7 +1744,7 @@ nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
 
 	nsp = nfp_nsp_open(cpp);
 	if (nsp == NULL) {
-		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
 		return false;
 	}
 
@@ -1776,7 +1776,7 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, pf_dev->ctrl_bar_size,
 			&area);
 	if (ctrl_bar == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
+		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol.");
 		return -ENODEV;
 	}
 
@@ -1798,7 +1798,7 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
 
 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
-		PMD_INIT_LOG(ERR, "Loaded firmware does not support multiple PF");
+		PMD_INIT_LOG(ERR, "Loaded firmware does not support multiple PF.");
 		err = -EINVAL;
 		goto end;
 	}
@@ -1822,7 +1822,7 @@ nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev)
 
 	if (pf_dev->multi_pf.enabled) {
 		if (!nfp_check_multi_pf_from_fw(total_phyports)) {
-			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
+			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf.");
 			return false;
 		}
 	} else {
@@ -1831,7 +1831,7 @@ nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev)
 		 * number of physical ports.
 		 */
 		if (total_phyports != pf_dev->nfp_eth_table->count) {
-			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
+			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs.");
 			return false;
 		}
 	}
@@ -1869,7 +1869,7 @@ nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
 	};
 
 	nfp_eth_table = pf_dev->nfp_eth_table;
-	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
+	PMD_INIT_LOG(INFO, "Total physical ports: %d.", nfp_eth_table->count);
 	id = nfp_function_id_get(pf_dev, 0);
 
 	/* Allocate memory for the CoreNIC app */
@@ -1896,12 +1896,12 @@ nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
 			pf_dev->total_phyports * pf_dev->ctrl_bar_size,
 			&pf_dev->ctrl_area);
 	if (pf_dev->ctrl_bar == NULL) {
-		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for %s", bar_name);
+		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for %s.", bar_name);
 		ret = -EIO;
 		goto app_cleanup;
 	}
 
-	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p", pf_dev->ctrl_bar);
+	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", pf_dev->ctrl_bar);
 
 	/* Loop through all physical ports on PF */
 	for (i = 0; i < pf_dev->total_phyports; i++) {
@@ -2141,7 +2141,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv)
 
 	switch (pf_dev->app_fw_id) {
 	case NFP_APP_FW_CORE_NIC:
-		PMD_INIT_LOG(INFO, "Initializing coreNIC");
+		PMD_INIT_LOG(INFO, "Initializing coreNIC.");
 		ret = nfp_init_app_fw_nic(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
@@ -2149,7 +2149,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv)
 		}
 		break;
 	case NFP_APP_FW_FLOWER_NIC:
-		PMD_INIT_LOG(INFO, "Initializing Flower");
+		PMD_INIT_LOG(INFO, "Initializing Flower.");
 		ret = nfp_init_app_fw_flower(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
@@ -2157,7 +2157,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv)
 		}
 		break;
 	default:
-		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
+		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
 		ret = -EINVAL;
 		return ret;
 	}
@@ -2197,7 +2197,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
 
 	pos = rte_pci_find_ext_capability(pf_dev->pci_dev, RTE_PCI_EXT_CAP_ID_SRIOV);
 	if (pos == 0) {
-		PMD_INIT_LOG(ERR, "Can not get the pci sriov cap");
+		PMD_INIT_LOG(ERR, "Can not get the pci sriov cap.");
 		return -EIO;
 	}
 
@@ -2208,7 +2208,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
 	ret = rte_pci_read_config(pf_dev->pci_dev, &sriov_vf, sizeof(sriov_vf),
 			pos + RTE_PCI_SRIOV_TOTAL_VF);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF");
+		PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF.");
 		return -EIO;
 	}
 
@@ -2216,7 +2216,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
 	ret = rte_pci_read_config(pf_dev->pci_dev, &offset, sizeof(offset),
 			pos + RTE_PCI_SRIOV_VF_OFFSET);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Can not get the VF offset");
+		PMD_INIT_LOG(ERR, "Can not get the VF offset.");
 		return -EIO;
 	}
 
@@ -2226,7 +2226,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
 
 	offset -= dev_info->pf_num_per_unit;
 	if (offset >= pf_dev->max_vfs || offset + sriov_vf > pf_dev->max_vfs) {
-		PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF");
+		PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF.");
 		return -ERANGE;
 	}
 
@@ -2245,11 +2245,11 @@ nfp_net_get_vf_info(struct nfp_pf_dev *pf_dev,
 	ret = nfp_pf_get_max_vf(pf_dev);
 	if (ret != 0) {
 		if (ret != -ENOENT) {
-			PMD_INIT_LOG(ERR, "Read max VFs failed");
+			PMD_INIT_LOG(ERR, "Read max VFs failed.");
 			return ret;
 		}
 
-		PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs");
+		PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs.");
 		return 0;
 	}
 
@@ -2335,13 +2335,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 
 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
 	if (dev_info == NULL) {
-		PMD_INIT_LOG(ERR, "Not supported device ID");
+		PMD_INIT_LOG(ERR, "Not supported device ID.");
 		return -ENODEV;
 	}
 
 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
 	if (hw_priv == NULL) {
-		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
+		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
 		return -ENOMEM;
 	}
 
@@ -2350,7 +2350,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
 	if (pf_dev == NULL) {
-		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device");
+		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
 		ret = -ENOMEM;
 		goto hw_priv_free;
 	}
@@ -2380,7 +2380,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
 
 	if (cpp == NULL) {
-		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
+		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained.");
 		ret = -EIO;
 		goto sync_free;
 	}
@@ -2390,7 +2390,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 
 	hwinfo = nfp_hwinfo_read(cpp);
 	if (hwinfo == NULL) {
-		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
+		PMD_INIT_LOG(ERR, "Error reading hwinfo table.");
 		ret = -EIO;
 		goto cpp_cleanup;
 	}
@@ -2400,13 +2400,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 	/* Read the number of physical ports from hardware */
 	nfp_eth_table = nfp_eth_read_ports(cpp);
 	if (nfp_eth_table == NULL) {
-		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
+		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table.");
 		ret = -EIO;
 		goto hwinfo_cleanup;
 	}
 
 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
-		PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
+		PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u.",
 				nfp_eth_table->count);
 		ret = -EIO;
 		goto eth_table_cleanup;
@@ -2419,28 +2419,28 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 
 	ret = nfp_net_force_port_down(pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to force port down");
+		PMD_INIT_LOG(ERR, "Failed to force port down.");
 		ret = -EIO;
 		goto eth_table_cleanup;
 	}
 
 	ret = nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error when parsing device args");
+		PMD_INIT_LOG(ERR, "Error when parsing device args.");
 		ret = -EINVAL;
 		goto eth_table_cleanup;
 	}
 
 	ret = nfp_net_device_activate(pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to activate the NFP device");
+		PMD_INIT_LOG(ERR, "Failed to activate the NFP device.");
 		ret = -EIO;
 		goto eth_table_cleanup;
 	}
 
 	ret = nfp_fw_setup(pf_dev, dev_info);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error when uploading firmware");
+		PMD_INIT_LOG(ERR, "Error when uploading firmware.");
 		ret = -EIO;
 		goto eth_table_cleanup;
 	}
@@ -2448,7 +2448,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 	/* Now the symbol table should be there */
 	sym_tbl = nfp_rtsym_table_read(cpp);
 	if (sym_tbl == NULL) {
-		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
+		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table.");
 		ret = -EIO;
 		goto fw_cleanup;
 	}
@@ -2459,7 +2459,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not read %s from firmware", app_name);
+		PMD_INIT_LOG(ERR, "Could not read %s from firmware.", app_name);
 		ret = -EIO;
 		goto sym_tbl_cleanup;
 	}
@@ -2496,18 +2496,18 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
 	if (pf_dev->qc_bar == NULL) {
-		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for net.qc");
+		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for net.qc.");
 		ret = -EIO;
 		goto sym_tbl_cleanup;
 	}
 
-	PMD_INIT_LOG(DEBUG, "The qc_bar address: %p", pf_dev->qc_bar);
+	PMD_INIT_LOG(DEBUG, "The qc_bar address: %p.", pf_dev->qc_bar);
 
 	pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats",
 			NFP_MAC_STATS_SIZE * nfp_eth_table->max_index,
 			&pf_dev->mac_stats_area);
 	if (pf_dev->mac_stats_bar == NULL) {
-		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for _mac_stats");
+		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for _mac_stats.");
 		goto hwqueues_cleanup;
 	}
 
@@ -2603,11 +2603,11 @@ nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
 	for (i = 0; i < total_vnics; i++) {
 		nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev);
 
-		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
+		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name);
 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0,
 				NULL, NULL, nfp_secondary_net_init, hw_priv);
 		if (ret != 0) {
-			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
+			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name);
 			goto port_cleanup;
 		}
 	}
@@ -2635,7 +2635,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv)
 
 	switch (pf_dev->app_fw_id) {
 	case NFP_APP_FW_CORE_NIC:
-		PMD_INIT_LOG(INFO, "Initializing coreNIC");
+		PMD_INIT_LOG(INFO, "Initializing coreNIC.");
 		ret = nfp_secondary_init_app_fw_nic(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
@@ -2643,7 +2643,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv)
 		}
 		break;
 	case NFP_APP_FW_FLOWER_NIC:
-		PMD_INIT_LOG(INFO, "Initializing Flower");
+		PMD_INIT_LOG(INFO, "Initializing Flower.");
 		ret = nfp_secondary_init_app_fw_flower(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
@@ -2651,7 +2651,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv)
 		}
 		break;
 	default:
-		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
+		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
 		ret = -EINVAL;
 		return ret;
 	}
@@ -2689,13 +2689,13 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 
 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
 	if (dev_info == NULL) {
-		PMD_INIT_LOG(ERR, "Not supported device ID");
+		PMD_INIT_LOG(ERR, "Not supported device ID.");
 		return -ENODEV;
 	}
 
 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
 	if (hw_priv == NULL) {
-		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
+		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
 		return -ENOMEM;
 	}
 
@@ -2704,7 +2704,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
 	if (pf_dev == NULL) {
-		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device");
+		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
 		ret = -ENOMEM;
 		goto hw_priv_free;
 	}
@@ -2734,7 +2734,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
 
 	if (cpp == NULL) {
-		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
+		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained.");
 		ret = -EIO;
 		goto sync_free;
 	}
@@ -2748,7 +2748,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	 */
 	sym_tbl = nfp_rtsym_table_read(cpp);
 	if (sym_tbl == NULL) {
-		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
+		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table.");
 		ret = -EIO;
 		goto cpp_cleanup;
 	}
@@ -2764,7 +2764,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Could not read %s from fw", app_name);
+		PMD_INIT_LOG(ERR, "Could not read %s from fw.", app_name);
 		ret = -EIO;
 		goto sym_tbl_cleanup;
 	}
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index b5fa05fc10..4941c915e7 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -51,7 +51,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 
 			if (dev->data->nb_rx_queues > 1) {
 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
-						"supports 1 queue with UIO");
+						"supports 1 queue with UIO.");
 				return -EIO;
 			}
 		}
@@ -268,7 +268,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
 	if (dev_info == NULL) {
-		PMD_INIT_LOG(ERR, "Not supported device ID");
+		PMD_INIT_LOG(ERR, "Not supported device ID.");
 		return -ENODEV;
 	}
 
@@ -277,7 +277,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
 	if (hw->ctrl_bar == NULL) {
-		PMD_DRV_LOG(ERR, "The hw->super.ctrl_bar is NULL. BAR0 not configured");
+		PMD_DRV_LOG(ERR, "The hw->super.ctrl_bar is NULL. BAR0 not configured.");
 		return -ENODEV;
 	}
 
@@ -298,7 +298,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	/* Set the ctrl bar size */
 	nfp_net_ctrl_bar_size_set(pf_dev);
 
-	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p", hw->ctrl_bar);
+	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar);
 
 	err = nfp_net_common_init(pf_dev, net_hw);
 	if (err != 0)
@@ -308,7 +308,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
 	if (hw_priv == NULL) {
-		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
+		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
 		err = -ENOMEM;
 		goto hw_priv_free;
 	}
@@ -340,7 +340,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	net_hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
 	net_hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
 
-	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
+	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.",
 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
 
 	nfp_net_cfg_queue_setup(net_hw);
@@ -358,14 +358,14 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	/* Allocating memory for mac addr */
 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
+		PMD_INIT_LOG(ERR, "Failed to space for MAC address.");
 		err = -ENOMEM;
 		goto free_xstats;
 	}
 
 	nfp_read_mac(hw);
 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
-		PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port);
+		PMD_INIT_LOG(INFO, "Using random mac address for port %hu.", port);
 		/* Using random mac addresses for VFs */
 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
diff --git a/drivers/net/nfp/nfp_ipsec.c b/drivers/net/nfp/nfp_ipsec.c
index 89116af1b2..b8f6d06371 100644
--- a/drivers/net/nfp/nfp_ipsec.c
+++ b/drivers/net/nfp/nfp_ipsec.c
@@ -453,7 +453,7 @@ nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *net_hw,
 
 	ret = nfp_net_mbox_reconfig(net_hw, NFP_NET_CFG_MBOX_CMD_IPSEC);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox");
+		PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox.");
 		return ret;
 	}
 
@@ -530,7 +530,7 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg,
 
 	iv_str = strdup(iv_string);
 	if (iv_str == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to strdup iv_string");
+		PMD_DRV_LOG(ERR, "Failed to strdup iv_string.");
 		return;
 	}
 
@@ -616,13 +616,13 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
 		}
 
 		if (aead->digest_length != 16) {
-			PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305");
+			PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305.");
 			return -EINVAL;
 		}
 
 		/* Aead->alg_key_len includes 32-bit salt */
 		if (key_length != 32) {
-			PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length");
+			PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length.");
 			return -EINVAL;
 		}
 
@@ -659,7 +659,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
 	if (iv_str != NULL) {
 		iv_len = aead->iv.length;
 		if (iv_len > NFP_ESP_IV_LENGTH) {
-			PMD_DRV_LOG(ERR, "Unsupported length of iv data");
+			PMD_DRV_LOG(ERR, "Unsupported length of iv data.");
 			return -EINVAL;
 		}
 
@@ -715,7 +715,7 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev,
 
 	key = (const rte_be32_t *)(cipher->key.data);
 	if (key_length > sizeof(cfg->cipher_key)) {
-		PMD_DRV_LOG(ERR, "Insufficient space for offloaded key");
+		PMD_DRV_LOG(ERR, "Insufficient space for offloaded key.");
 		return -EINVAL;
 	}
 
@@ -858,7 +858,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev,
 	}
 
 	if (digest_length == 0) {
-		PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length");
+		PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length.");
 		return -EINVAL;
 	}
 
@@ -1013,7 +1013,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
 		cfg->ctrl_word.encap_dsbl = 0;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d",
+		PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d.",
 				conf->action_type);
 		return -EINVAL;
 	}
@@ -1026,7 +1026,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
 		cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d",
+		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d.",
 				conf->ipsec.proto);
 		return -EINVAL;
 	}
@@ -1062,7 +1062,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev,
 
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d",
+		PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d.",
 				conf->ipsec.mode);
 		return -EINVAL;
 	}
@@ -1100,7 +1100,7 @@ nfp_crypto_create_session(void *device,
 	net_hw = eth_dev->data->dev_private;
 
 	if (net_hw->ipsec_data->sa_free_cnt == 0) {
-		PMD_DRV_LOG(ERR, "No space in SA table, spi: %d", conf->ipsec.spi);
+		PMD_DRV_LOG(ERR, "No space in SA table, spi: %d.", conf->ipsec.spi);
 		return -EINVAL;
 	}
 
@@ -1122,7 +1122,7 @@ nfp_crypto_create_session(void *device,
 	msg.sa_idx = sa_idx;
 	ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Failed to add SA to nic");
+		PMD_DRV_LOG(ERR, "Failed to add SA to nic.");
 		return -EINVAL;
 	}
 
@@ -1255,7 +1255,7 @@ nfp_security_session_get_stats(void *device,
 
 	ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Failed to get SA stats");
+		PMD_DRV_LOG(ERR, "Failed to get SA stats.");
 		return ret;
 	}
 
@@ -1330,13 +1330,13 @@ nfp_crypto_remove_session(void *device,
 	eth_dev = device;
 	priv_session = SECURITY_GET_SESS_PRIV(session);
 	if (eth_dev != priv_session->dev) {
-		PMD_DRV_LOG(ERR, "Session not bound to this device");
+		PMD_DRV_LOG(ERR, "Session not bound to this device.");
 		return -ENODEV;
 	}
 
 	ret = nfp_crypto_remove_sa(eth_dev, priv_session);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Failed to remove session");
+		PMD_DRV_LOG(ERR, "Failed to remove session.");
 		return -EFAULT;
 	}
 
@@ -1369,7 +1369,7 @@ nfp_ipsec_ctx_create(struct rte_eth_dev *dev,
 	ctx = rte_zmalloc("security_ctx",
 			sizeof(struct rte_security_ctx), 0);
 	if (ctx == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to malloc security_ctx");
+		PMD_INIT_LOG(ERR, "Failed to malloc security_ctx.");
 		return -ENOMEM;
 	}
 
@@ -1380,7 +1380,7 @@ nfp_ipsec_ctx_create(struct rte_eth_dev *dev,
 
 	data->pkt_dynfield_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
 	if (data->pkt_dynfield_offset < 0) {
-		PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield");
+		PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield.");
 		return -ENOMEM;
 	}
 
@@ -1399,13 +1399,13 @@ nfp_ipsec_init(struct rte_eth_dev *dev)
 
 	cap_extend = net_hw->super.cap_ext;
 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) {
-		PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability");
+		PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability.");
 		return 0;
 	}
 
 	data = rte_zmalloc("ipsec_data", sizeof(struct nfp_net_ipsec_data), 0);
 	if (data == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data");
+		PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data.");
 		return -ENOMEM;
 	}
 
@@ -1415,7 +1415,7 @@ nfp_ipsec_init(struct rte_eth_dev *dev)
 
 	ret = nfp_ipsec_ctx_create(dev, data);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to create IPsec ctx");
+		PMD_INIT_LOG(ERR, "Failed to create IPsec ctx.");
 		goto ipsec_cleanup;
 	}
 
@@ -1445,7 +1445,7 @@ nfp_ipsec_uninit(struct rte_eth_dev *dev)
 
 	cap_extend = net_hw->super.cap_ext;
 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) {
-		PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability");
+		PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability.");
 		return;
 	}
 
diff --git a/drivers/net/nfp/nfp_mtr.c b/drivers/net/nfp/nfp_mtr.c
index ae7f9558be..d4f2c4f2f0 100644
--- a/drivers/net/nfp/nfp_mtr.c
+++ b/drivers/net/nfp/nfp_mtr.c
@@ -43,7 +43,7 @@ nfp_mtr_cap_get(struct rte_eth_dev *dev __rte_unused,
 	if (cap == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "NULL pointer for capabilitie argument");
+				NULL, "NULL pointer for capabilitie argument.");
 	}
 
 	memset(cap, 0, sizeof(struct rte_mtr_capabilities));
@@ -78,14 +78,14 @@ nfp_mtr_profile_validate(uint32_t mtr_profile_id,
 	if (profile == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE,
-				NULL, "Meter profile is null");
+				NULL, "Meter profile is null.");
 	}
 
 	/* Meter profile ID must be valid. */
 	if (mtr_profile_id >= NFP_MAX_PROFILE_CNT) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Meter profile id not valid");
+				NULL, "Meter profile id not valid.");
 	}
 
 	switch (profile->alg) {
@@ -95,11 +95,11 @@ nfp_mtr_profile_validate(uint32_t mtr_profile_id,
 	case RTE_MTR_TRTCM_RFC4115:
 		return -rte_mtr_error_set(error, ENOTSUP,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE,
-				NULL, "Unsupported metering algorithm");
+				NULL, "Unsupported metering algorithm.");
 	default:
 		return -rte_mtr_error_set(error, ENOTSUP,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE,
-				NULL, "Unknown metering algorithm");
+				NULL, "Unknown metering algorithm.");
 	}
 }
 
@@ -202,7 +202,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower,
 	if (mtr_profile == NULL) {
 		return -rte_mtr_error_set(error, ENOMEM,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Meter profile alloc failed");
+				NULL, "Meter profile alloc failed.");
 	}
 
 	ret = nfp_mtr_profile_conf_insert(mtr_profile_id,
@@ -210,7 +210,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower,
 	if (ret != 0) {
 		rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Insert profile config failed");
+				NULL, "Insert profile config failed.");
 		goto free_profile;
 	}
 
@@ -218,7 +218,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower,
 	if (ret != 0) {
 		rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Add meter to firmware failed");
+				NULL, "Add meter to firmware failed.");
 		goto free_profile;
 	}
 
@@ -252,7 +252,7 @@ nfp_mtr_profile_mod(struct nfp_app_fw_flower *app_fw_flower,
 	if (ret != 0) {
 		rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Mod profile config failed");
+				NULL, "Mod profile config failed.");
 		goto rollback;
 	}
 
@@ -260,7 +260,7 @@ nfp_mtr_profile_mod(struct nfp_app_fw_flower *app_fw_flower,
 	if (ret != 0) {
 		rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Mod meter to firmware failed");
+				NULL, "Mod meter to firmware failed.");
 		goto rollback;
 	}
 
@@ -354,20 +354,20 @@ nfp_mtr_profile_delete(struct rte_eth_dev *dev,
 	if (mtr_profile == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Request meter profile not exist");
+				NULL, "Request meter profile not exist.");
 	}
 
 	if (mtr_profile->in_use) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE,
-				NULL, "Request meter profile is been used");
+				NULL, "Request meter profile is been used.");
 	}
 
 	ret = nfp_flower_cmsg_qos_delete(app_fw_flower, &mtr_profile->conf);
 	if (ret != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Delete meter from firmware failed");
+				NULL, "Delete meter from firmware failed.");
 	}
 
 	/* Remove profile from profile list */
@@ -417,7 +417,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id,
 	if (action != NULL && action->type != RTE_FLOW_ACTION_TYPE_VOID) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_POLICY,
-				NULL, "Green action must be void or end");
+				NULL, "Green action must be void or end.");
 	}
 
 	/* Check yellow action
@@ -427,7 +427,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id,
 	if (action != NULL && action->type != RTE_FLOW_ACTION_TYPE_VOID) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_POLICY,
-				NULL, "Yellow action must be void or end");
+				NULL, "Yellow action must be void or end.");
 	}
 
 	/* Check red action */
@@ -435,7 +435,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id,
 	if (action == NULL || action->type != RTE_FLOW_ACTION_TYPE_DROP) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_POLICY,
-				NULL, "Red action must be drop");
+				NULL, "Red action must be drop.");
 	}
 
 	return 0;
@@ -475,7 +475,7 @@ nfp_mtr_policy_add(struct rte_eth_dev *dev,
 	if (mtr_policy != NULL) {
 		return -rte_mtr_error_set(error, EEXIST,
 				RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
-				NULL, "Meter policy already exist");
+				NULL, "Meter policy already exist.");
 	}
 
 	/* Check input params */
@@ -488,7 +488,7 @@ nfp_mtr_policy_add(struct rte_eth_dev *dev,
 	if (mtr_policy == NULL) {
 		return -rte_mtr_error_set(error, ENOMEM,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Meter policy alloc failed");
+				NULL, "Meter policy alloc failed.");
 	}
 
 	mtr_policy->policy_id = mtr_policy_id;
@@ -531,13 +531,13 @@ nfp_mtr_policy_delete(struct rte_eth_dev *dev,
 	if (mtr_policy == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
-				NULL, "Request meter policy not exist");
+				NULL, "Request meter policy not exist.");
 	}
 
 	if (mtr_policy->ref_cnt > 0) {
 		return -rte_mtr_error_set(error, EBUSY,
 				RTE_MTR_ERROR_TYPE_METER_POLICY,
-				NULL, "Request mtr policy is been used");
+				NULL, "Request mtr policy is been used.");
 	}
 
 	/* Remove profile from profile list */
@@ -577,25 +577,25 @@ nfp_mtr_stats_mask_validate(uint64_t stats_mask, struct rte_mtr_error *error)
 	if ((stats_mask & RTE_MTR_STATS_N_PKTS_YELLOW) != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
-				NULL, "RTE_MTR_STATS_N_PKTS_YELLOW not support");
+				NULL, "RTE_MTR_STATS_N_PKTS_YELLOW not support.");
 	}
 
 	if ((stats_mask & RTE_MTR_STATS_N_PKTS_RED) != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
-				NULL, "RTE_MTR_STATS_N_PKTS_RED not support");
+				NULL, "RTE_MTR_STATS_N_PKTS_RED not support.");
 	}
 
 	if ((stats_mask & RTE_MTR_STATS_N_BYTES_YELLOW) != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
-				NULL, "RTE_MTR_STATS_N_BYTES_YELLOW not support");
+				NULL, "RTE_MTR_STATS_N_BYTES_YELLOW not support.");
 	}
 
 	if ((stats_mask & RTE_MTR_STATS_N_BYTES_RED) != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
-				NULL, "RTE_MTR_STATS_N_BYTES_RED not support");
+				NULL, "RTE_MTR_STATS_N_BYTES_RED not support.");
 	}
 
 	return 0;
@@ -623,7 +623,7 @@ nfp_mtr_validate(uint32_t meter_id,
 	if (params->use_prev_mtr_color != 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
-				NULL, "Feature use_prev_mtr_color not support");
+				NULL, "Feature use_prev_mtr_color not support.");
 	}
 
 	return nfp_mtr_stats_mask_validate(params->stats_mask, error);
@@ -689,7 +689,7 @@ nfp_mtr_create(struct rte_eth_dev *dev,
 	if (mtr != NULL) {
 		return -rte_mtr_error_set(error, EEXIST,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Meter already exist");
+				NULL, "Meter already exist.");
 	}
 
 	/* Check input meter params */
@@ -701,20 +701,20 @@ nfp_mtr_create(struct rte_eth_dev *dev,
 	if (mtr_profile == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Request meter profile not exist");
+				NULL, "Request meter profile not exist.");
 	}
 
 	if (mtr_profile->in_use) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Request meter profile is been used");
+				NULL, "Request meter profile is been used.");
 	}
 
 	mtr_policy = nfp_mtr_policy_search(priv, params->meter_policy_id);
 	if (mtr_policy == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
-				NULL, "Request meter policy not exist");
+				NULL, "Request meter policy not exist.");
 	}
 
 	/* Meter param memory alloc */
@@ -722,7 +722,7 @@ nfp_mtr_create(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, ENOMEM,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
-				NULL, "Meter param alloc failed");
+				NULL, "Meter param alloc failed.");
 	}
 
 	nfp_mtr_config(mtr_id, shared, params, mtr_profile, mtr_policy, mtr);
@@ -767,13 +767,13 @@ nfp_mtr_destroy(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter not exist");
+				NULL, "Request meter not exist.");
 	}
 
 	if (mtr->ref_cnt > 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Meter object is being used");
+				NULL, "Meter object is being used.");
 	}
 
 	/* Update profile/policy status */
@@ -817,7 +817,7 @@ nfp_mtr_enable(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter not exist");
+				NULL, "Request meter not exist.");
 	}
 
 	mtr->enable = true;
@@ -855,13 +855,13 @@ nfp_mtr_disable(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter not exist");
+				NULL, "Request meter not exist.");
 	}
 
 	if (mtr->ref_cnt > 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Can not disable a used meter");
+				NULL, "Can not disable a used meter.");
 	}
 
 	mtr->enable = false;
@@ -903,13 +903,13 @@ nfp_mtr_profile_update(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter not exist");
+				NULL, "Request meter not exist.");
 	}
 
 	if (mtr->ref_cnt > 0) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter is been used");
+				NULL, "Request meter is been used.");
 	}
 
 	if (mtr->mtr_profile->profile_id == mtr_profile_id)
@@ -919,13 +919,13 @@ nfp_mtr_profile_update(struct rte_eth_dev *dev,
 	if (mtr_profile == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Request meter profile not exist");
+				NULL, "Request meter profile not exist.");
 	}
 
 	if (mtr_profile->in_use) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-				NULL, "Request meter profile is been used");
+				NULL, "Request meter profile is been used.");
 	}
 
 	mtr_profile->in_use = true;
@@ -969,7 +969,7 @@ nfp_mtr_stats_update(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EEXIST,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter id not exist");
+				NULL, "Request meter id not exist.");
 	}
 
 	ret = nfp_mtr_stats_mask_validate(stats_mask, error);
@@ -1022,7 +1022,7 @@ nfp_mtr_stats_read(struct rte_eth_dev *dev,
 	if (mtr == NULL) {
 		return -rte_mtr_error_set(error, EINVAL,
 				RTE_MTR_ERROR_TYPE_MTR_ID,
-				NULL, "Request meter not exist");
+				NULL, "Request meter not exist.");
 	}
 
 	*stats_mask = mtr->stats_mask;
@@ -1067,7 +1067,7 @@ int
 nfp_net_mtr_ops_get(struct rte_eth_dev *dev, void *arg)
 {
 	if (!rte_eth_dev_is_repr(dev)) {
-		PMD_DRV_LOG(ERR, "Port is not a representor");
+		PMD_DRV_LOG(ERR, "Port is not a representor.");
 		return -EINVAL;
 	}
 
@@ -1097,7 +1097,7 @@ nfp_mtr_priv_init(struct nfp_pf_dev *pf_dev)
 
 	priv = rte_zmalloc("nfp_app_mtr_priv", sizeof(struct nfp_mtr_priv), 0);
 	if (priv == NULL) {
-		PMD_INIT_LOG(ERR, "NFP app mtr priv creation failed");
+		PMD_INIT_LOG(ERR, "NFP app mtr priv creation failed.");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/nfp/nfp_net_cmsg.c b/drivers/net/nfp/nfp_net_cmsg.c
index f2f694be0b..8f77c5588a 100644
--- a/drivers/net/nfp/nfp_net_cmsg.c
+++ b/drivers/net/nfp/nfp_net_cmsg.c
@@ -45,19 +45,19 @@ nfp_net_cmsg_xmit(struct nfp_net_hw *hw,
 	case NFP_NET_CFG_MBOX_RET_FS_OK:
 		break;
 	case NFP_NET_CFG_MBOX_RET_FS_ERR_NO_SPACE:
-		PMD_DRV_LOG(ERR, "Not enough space for cmd %u", cmsg->cmd);
+		PMD_DRV_LOG(ERR, "Not enough space for cmd %u.", cmsg->cmd);
 		ret = -ENOSPC;
 		break;
 	case NFP_NET_CFG_MBOX_RET_FS_ERR_MASK_FULL:
-		PMD_DRV_LOG(ERR, "The mask table is full for cmd %u", cmsg->cmd);
+		PMD_DRV_LOG(ERR, "The mask table is full for cmd %u.", cmsg->cmd);
 		ret = -EXFULL;
 		break;
 	case NFP_NET_CFG_MBOX_RET_FS_ERR_CMD_INVALID:
-		PMD_DRV_LOG(ERR, "The mbox cmd %u invalid", cmsg->cmd);
+		PMD_DRV_LOG(ERR, "The mbox cmd %u invalid.", cmsg->cmd);
 		ret = -EINVAL;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "Unrecognized mbox cmd %u", cmsg->cmd);
+		PMD_DRV_LOG(ERR, "Unrecognized mbox cmd %u.", cmsg->cmd);
 		ret = -EINVAL;
 		break;
 	}
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index 7a37b9c2aa..1a04916068 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -288,7 +288,7 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
 
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
+		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x.",
 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
 		return -EIO;
 	}
@@ -359,20 +359,20 @@ nfp_net_configure(struct rte_eth_dev *dev)
 
 	/* Checking TX mode */
 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
-		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported");
+		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported.");
 		return -EINVAL;
 	}
 
 	/* Checking RX mode */
 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
-		PMD_DRV_LOG(ERR, "RSS not supported");
+		PMD_DRV_LOG(ERR, "RSS not supported.");
 		return -EINVAL;
 	}
 
 	/* Checking MTU set */
 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
-		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)",
+		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u).",
 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
 		return -ERANGE;
 	}
@@ -387,10 +387,10 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw,
 	uint32_t cap = hw->super.cap;
 	uint32_t cap_ext = hw->super.cap_ext;
 
-	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
+	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d.",
 			pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu);
 
-	PMD_INIT_LOG(INFO, "CAP: %#x", cap);
+	PMD_INIT_LOG(INFO, "CAP: %#x.", cap);
 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
@@ -422,7 +422,7 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw,
 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
 
-	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x", cap_ext);
+	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x.", cap_ext);
 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
@@ -432,7 +432,7 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw,
 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
 
-	PMD_INIT_LOG(INFO, "The max_rx_queues: %u, max_tx_queues: %u",
+	PMD_INIT_LOG(INFO, "The max_rx_queues: %u, max_tx_queues: %u.",
 			hw->max_rx_queues, hw->max_tx_queues);
 }
 
@@ -493,12 +493,12 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev,
 	hw = &net_hw->super;
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
-		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled");
+		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled.");
 		return -EBUSY;
 	}
 
 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
-		PMD_DRV_LOG(ERR, "Invalid MAC address");
+		PMD_DRV_LOG(ERR, "Invalid MAC address.");
 		return -EINVAL;
 	}
 
@@ -513,7 +513,7 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev,
 
 	/* Signal the NIC about the change */
 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
-		PMD_DRV_LOG(ERR, "MAC address update failed");
+		PMD_DRV_LOG(ERR, "MAC address update failed.");
 		return -EIO;
 	}
 
@@ -531,7 +531,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 
 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
 				dev->data->nb_rx_queues) != 0) {
-		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec",
+		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec.",
 				dev->data->nb_rx_queues);
 		return -ENOMEM;
 	}
@@ -539,13 +539,13 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 	hw = nfp_net_get_hw(dev);
 
 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
-		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
+		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO.");
 		/* UIO just supports one queue and no LSC */
 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
 			return -1;
 	} else {
-		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO");
+		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO.");
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			/*
 			 * The first msix vector is reserved for non
@@ -645,12 +645,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
 
 	hw = &net_hw->super;
 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
-		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
+		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
 		return -ENOTSUP;
 	}
 
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
-		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
+		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled.");
 		return 0;
 	}
 
@@ -679,12 +679,12 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
 	hw = &net_hw->super;
 
 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
-		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
+		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
 		return -ENOTSUP;
 	}
 
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
-		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
+		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled.");
 		return 0;
 	}
 
@@ -717,7 +717,7 @@ nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
 
 	cap_extend = hw->cap_ext;
 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
-		PMD_DRV_LOG(DEBUG, "Allmulticast mode not supported");
+		PMD_DRV_LOG(DEBUG, "Allmulticast mode not supported.");
 		return -ENOTSUP;
 	}
 
@@ -834,9 +834,9 @@ nfp_net_link_update_common(struct rte_eth_dev *dev,
 	ret = rte_eth_linkstatus_set(dev, link);
 	if (ret == 0) {
 		if (link->link_status == RTE_ETH_LINK_UP)
-			PMD_DRV_LOG(INFO, "NIC Link is Up");
+			PMD_DRV_LOG(INFO, "NIC Link is Up.");
 		else
-			PMD_DRV_LOG(INFO, "NIC Link is Down");
+			PMD_DRV_LOG(INFO, "NIC Link is Down.");
 	}
 
 	return ret;
@@ -1065,7 +1065,7 @@ nfp_net_xstats_info(const struct rte_eth_dev *dev,
 		uint32_t index)
 {
 	if (index >= nfp_net_xstats_size(dev)) {
-		PMD_DRV_LOG(ERR, "The xstat index out of bounds");
+		PMD_DRV_LOG(ERR, "The xstat index out of bounds.");
 		return NULL;
 	}
 
@@ -1422,7 +1422,7 @@ nfp_net_common_init(struct nfp_pf_dev *pf_dev,
 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
-				"pairs for use", pci_dev->name);
+				"pairs for use.", pci_dev->name);
 		return -ENODEV;
 	}
 
@@ -1587,12 +1587,12 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 	if (link.link_status != 0)
-		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s.",
 				dev->data->port_id, link.link_speed,
 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 	else
-		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
+		PMD_DRV_LOG(INFO, " Port %d: Link Down.", dev->data->port_id);
 
 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
 			pci_dev->addr.domain, pci_dev->addr.bus,
@@ -1674,7 +1674,7 @@ nfp_net_dev_interrupt_handler(void *param)
 	if (rte_eal_alarm_set(timeout * 1000,
 			nfp_net_dev_interrupt_delayed_handler,
 			(void *)dev) != 0) {
-		PMD_INIT_LOG(ERR, "Error setting alarm");
+		PMD_INIT_LOG(ERR, "Error setting alarm.");
 		/* Unmasking */
 		nfp_net_irq_unmask(dev);
 	}
@@ -1690,14 +1690,14 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
 
 	/* MTU setting is forbidden if port is started */
 	if (dev->data->dev_started) {
-		PMD_DRV_LOG(ERR, "Port %d must be stopped before configuration",
+		PMD_DRV_LOG(ERR, "Port %d must be stopped before configuration.",
 				dev->data->port_id);
 		return -EBUSY;
 	}
 
 	/* MTU larger than current mbufsize not supported */
 	if (mtu > hw->flbufsz) {
-		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
+		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported.",
 				mtu, hw->flbufsz);
 		return -ERANGE;
 	}
@@ -1777,7 +1777,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 
 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
-				" does not match hardware can supported (%d)",
+				" does not match hardware can supported (%d).",
 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
 		return -EINVAL;
 	}
@@ -1869,7 +1869,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 
 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
-				" does not match hardware can supported (%d)",
+				" does not match hardware can supported (%d).",
 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
 		return -EINVAL;
 	}
@@ -1979,7 +1979,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
 	/* Checking if RSS is enabled */
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
 		if (rss_hf != 0) {
-			PMD_DRV_LOG(ERR, "RSS unsupported");
+			PMD_DRV_LOG(ERR, "RSS unsupported.");
 			return -EINVAL;
 		}
 
@@ -1987,7 +1987,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
 	}
 
 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
-		PMD_DRV_LOG(ERR, "RSS hash key too long");
+		PMD_DRV_LOG(ERR, "RSS hash key too long.");
 		return -EINVAL;
 	}
 
@@ -2089,7 +2089,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
 
 	dev_conf = &dev->data->dev_conf;
 	if (dev_conf == NULL) {
-		PMD_DRV_LOG(ERR, "Wrong rss conf");
+		PMD_DRV_LOG(ERR, "Wrong rss conf.");
 		return -EINVAL;
 	}
 
@@ -2215,7 +2215,7 @@ nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev)
 			rte_socket_id(),
 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
 	if (net_hw->txrwb_mz == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back",
+		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back.",
 				mz_name);
 		return -ENOMEM;
 	}
@@ -2390,7 +2390,7 @@ nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
 
 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
 		if (version.major < 5) {
-			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
+			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d.",
 					version.major);
 			return false;
 		}
@@ -2796,7 +2796,7 @@ nfp_net_sriov_update(struct nfp_net_hw *net_hw,
 	ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id,
 			NFP_NET_VF_CFG_MB_VF_NUM);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Error nfp VF reconfig");
+		PMD_INIT_LOG(ERR, "Error nfp VF reconfig.");
 		return ret;
 	}
 
@@ -2814,11 +2814,11 @@ nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG);
 	if (ret != 0) {
 		if (ret == -ENOTSUP) {
-			PMD_INIT_LOG(DEBUG, "Set VF max queue not supported");
+			PMD_INIT_LOG(DEBUG, "Set VF max queue not supported.");
 			return 0;
 		}
 
-		PMD_INIT_LOG(ERR, "Set VF max queue failed");
+		PMD_INIT_LOG(ERR, "Set VF max queue failed.");
 		return ret;
 	}
 
@@ -2827,7 +2827,7 @@ nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
 		ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG,
 				pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i);
 		if (ret != 0) {
-			PMD_INIT_LOG(ERR, "Set VF max_queue failed");
+			PMD_INIT_LOG(ERR, "Set VF max_queue failed.");
 			return ret;
 		}
 	}
@@ -2844,11 +2844,11 @@ nfp_net_sriov_init(struct nfp_net_hw *net_hw,
 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT);
 	if (ret != 0) {
 		if (ret == -ENOTSUP) {
-			PMD_INIT_LOG(DEBUG, "Set VF split not supported");
+			PMD_INIT_LOG(DEBUG, "Set VF split not supported.");
 			return 0;
 		}
 
-		PMD_INIT_LOG(ERR, "Set VF split failed");
+		PMD_INIT_LOG(ERR, "Set VF split failed.");
 		return ret;
 	}
 
@@ -2856,7 +2856,7 @@ nfp_net_sriov_init(struct nfp_net_hw *net_hw,
 
 	ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed");
+		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed.");
 		return ret;
 	}
 
@@ -2874,13 +2874,13 @@ nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
 
 	ret = nfp_net_sriov_init(net_hw, pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to init sriov module");
+		PMD_INIT_LOG(ERR, "Failed to init sriov module.");
 		return ret;
 	}
 
 	ret = nfp_net_vf_queues_config(net_hw, pf_dev);
 	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to config vf queue");
+		PMD_INIT_LOG(ERR, "Failed to config vf queue.");
 		return ret;
 	}
 
diff --git a/drivers/net/nfp/nfp_net_ctrl.c b/drivers/net/nfp/nfp_net_ctrl.c
index b34d8f140f..cc56ff69e9 100644
--- a/drivers/net/nfp/nfp_net_ctrl.c
+++ b/drivers/net/nfp/nfp_net_ctrl.c
@@ -50,7 +50,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
 		offset = data - net_hw->super.ctrl_bar;
 
 		if (data + NFP_NET_CFG_TLV_VALUE > end) {
-			PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV");
+			PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV.");
 			return -EINVAL;
 		}
 
@@ -58,14 +58,14 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
 
 		length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
 		if ((length & (NFP_NET_CFG_TLV_LENGTH_INC - 1)) != 0) {
-			PMD_DRV_LOG(ERR, "TLV size not multiple of 4B len: %u", length);
+			PMD_DRV_LOG(ERR, "TLV size not multiple of 4B len: %u.", length);
 			return -EINVAL;
 		}
 
 		/* Advance past the header */
 		data += NFP_NET_CFG_TLV_VALUE;
 		if (data + length > end) {
-			PMD_DRV_LOG(ERR, "Oversized TLV offset: %u len: %u",
+			PMD_DRV_LOG(ERR, "Oversized TLV offset: %u len: %u.",
 					offset, length);
 			return -EINVAL;
 		}
@@ -74,7 +74,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
 
 		switch (tlv_type) {
 		case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
-			PMD_DRV_LOG(ERR, "Unknown TLV at offset: %u", offset);
+			PMD_DRV_LOG(ERR, "Unknown TLV at offset: %u.", offset);
 			return -EINVAL;
 		case NFP_NET_CFG_TLV_TYPE_RESERVED:
 			break;
@@ -82,7 +82,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
 			if (length == 0)
 				return 0;
 
-			PMD_DRV_LOG(ERR, "END TLV should be empty, has len: %u", length);
+			PMD_DRV_LOG(ERR, "END TLV should be empty, has len: %u.", length);
 			return -EINVAL;
 		case NFP_NET_CFG_TLV_TYPE_MBOX:
 			caps->mbox_len = length;
@@ -100,12 +100,12 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
 			if (FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr) == 0)
 				break;
 
-			PMD_DRV_LOG(ERR, "Unknown TLV type: %u offset: %u len: %u",
+			PMD_DRV_LOG(ERR, "Unknown TLV type: %u offset: %u len: %u.",
 					tlv_type, offset, length);
 			return -EINVAL;
 		}
 	}
 
-	PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV");
+	PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV.");
 	return -EINVAL;
 }
diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c
index a5d1362001..7a68dc204f 100644
--- a/drivers/net/nfp/nfp_net_flow.c
+++ b/drivers/net/nfp/nfp_net_flow.c
@@ -194,23 +194,23 @@ nfp_net_flow_calculate_items(const struct rte_flow_item items[],
 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
 		switch (item->type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
-			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected");
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected.");
 			*match_len = sizeof(struct nfp_net_cmsg_match_eth);
 			*item_type = RTE_FLOW_ITEM_TYPE_ETH;
 			ret = 0;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
-			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected");
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected.");
 			*match_len = sizeof(struct nfp_net_cmsg_match_v4);
 			*item_type = RTE_FLOW_ITEM_TYPE_IPV4;
 			return 0;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
-			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected");
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected.");
 			*match_len = sizeof(struct nfp_net_cmsg_match_v6);
 			*item_type = RTE_FLOW_ITEM_TYPE_IPV6;
 			return 0;
 		default:
-			PMD_DRV_LOG(ERR, "Can not calculate match length");
+			PMD_DRV_LOG(ERR, "Can not calculate match length.");
 			*match_len = 0;
 			return -ENOTSUP;
 		}
@@ -525,7 +525,7 @@ nfp_net_flow_compile_items(const struct rte_flow_item items[],
 		}
 
 		if (proc == NULL) {
-			PMD_DRV_LOG(ERR, "No next item provided for %d", item->type);
+			PMD_DRV_LOG(ERR, "No next item provided for %d.", item->type);
 			ret = -ENOTSUP;
 			break;
 		}
@@ -533,20 +533,20 @@ nfp_net_flow_compile_items(const struct rte_flow_item items[],
 		/* Perform basic sanity checks */
 		ret = nfp_net_flow_item_check(item, proc);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d check failed.", item->type);
 			ret = -EINVAL;
 			break;
 		}
 
 		if (proc->merge == NULL) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d no proc function.", item->type);
 			ret = -ENOTSUP;
 			break;
 		}
 
 		ret = proc->merge(nfp_flow, item, proc);
 		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type);
+			PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed.", item->type);
 			break;
 		}
 
@@ -592,7 +592,7 @@ nfp_net_flow_action_queue(struct rte_eth_dev *dev,
 	queue = action->conf;
 	if (queue->index >= dev->data->nb_rx_queues ||
 			dev->data->rx_queues[queue->index] == NULL) {
-		PMD_DRV_LOG(ERR, "Queue index is illegal");
+		PMD_DRV_LOG(ERR, "Queue index is illegal.");
 		return -EINVAL;
 	}
 
@@ -613,19 +613,19 @@ nfp_net_flow_compile_actions(struct rte_eth_dev *dev,
 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
 		switch (action->type) {
 		case RTE_FLOW_ACTION_TYPE_DROP:
-			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP");
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP.");
 			nfp_net_flow_action_drop(nfp_flow);
 			return 0;
 		case RTE_FLOW_ACTION_TYPE_MARK:
-			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK");
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK.");
 			nfp_net_flow_action_mark(nfp_flow, action);
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
-			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE");
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE.");
 			ret = nfp_net_flow_action_queue(dev, nfp_flow, action);
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
+			PMD_DRV_LOG(ERR, "Unsupported action type: %d.", action->type);
 			return -ENOTSUP;
 		}
 	}
@@ -1091,7 +1091,7 @@ nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev,
 
 	priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0);
 	if (priv == NULL) {
-		PMD_INIT_LOG(ERR, "NFP app nic priv creation failed");
+		PMD_INIT_LOG(ERR, "NFP app nic priv creation failed.");
 		ret = -ENOMEM;
 		goto exit;
 	}
@@ -1122,7 +1122,7 @@ nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev,
 	flow_hash_params.entries = priv->flow_limit * NFP_NET_HASH_REDUNDANCE;
 	priv->flow_table = rte_hash_create(&flow_hash_params);
 	if (priv->flow_table == NULL) {
-		PMD_INIT_LOG(ERR, "Flow hash table creation failed");
+		PMD_INIT_LOG(ERR, "Flow hash table creation failed.");
 		ret = -ENOMEM;
 		goto free_flow_position;
 	}
diff --git a/drivers/net/nfp/nfp_net_meta.c b/drivers/net/nfp/nfp_net_meta.c
index 5a67f87bee..70169eba6b 100644
--- a/drivers/net/nfp/nfp_net_meta.c
+++ b/drivers/net/nfp/nfp_net_meta.c
@@ -177,7 +177,7 @@ nfp_net_meta_parse_qinq(const struct nfp_net_meta_parsed *meta,
 		mb->vlan_tci = rte_cpu_to_le_16(meta->vlan[0].tci);
 
 	mb->vlan_tci_outer = rte_cpu_to_le_16(meta->vlan[1].tci);
-	PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u",
+	PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u.",
 			mb->vlan_tci_outer, mb->vlan_tci);
 	mb->ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
 }
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index d38f51b777..c7812a6dee 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -151,7 +151,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 	uint64_t dma_addr;
 	struct nfp_net_dp_buf *rxe = rxq->rxbufs;
 
-	PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %hu descriptors",
+	PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %hu descriptors.",
 			rxq->rx_count);
 
 	for (i = 0; i < rxq->rx_count; i++) {
@@ -159,7 +159,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
 
 		if (mbuf == NULL) {
-			PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%hu",
+			PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%hu.",
 				rxq->qidx);
 			return -ENOMEM;
 		}
@@ -178,7 +178,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 	rte_wmb();
 
 	/* Not advertising the whole ring as the firmware gets confused if so */
-	PMD_RX_LOG(DEBUG, "Increment FL write pointer in %hu", rxq->rx_count - 1);
+	PMD_RX_LOG(DEBUG, "Increment FL write pointer in %hu.", rxq->rx_count - 1);
 
 	nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
 
@@ -260,7 +260,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype,
 		mbuf_ptype |= RTE_PTYPE_L3_IPV6;
 		break;
 	default:
-		PMD_RX_LOG(DEBUG, "Unrecognized nfp outer layer 3 packet type: %u",
+		PMD_RX_LOG(DEBUG, "Unrecognized nfp outer layer 3 packet type: %u.",
 				nfp_ptype->outer_l3_ptype);
 		break;
 	}
@@ -278,7 +278,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype,
 		mbuf_ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
 		break;
 	default:
-		PMD_RX_LOG(DEBUG, "Unrecognized nfp tunnel packet type: %u",
+		PMD_RX_LOG(DEBUG, "Unrecognized nfp tunnel packet type: %u.",
 				nfp_tunnel_ptype);
 		break;
 	}
@@ -305,7 +305,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype,
 		mbuf_ptype |= NFP_PTYPE2RTE(nfp_tunnel_ptype, L4_SCTP);
 		break;
 	default:
-		PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 4 packet type: %u",
+		PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 4 packet type: %u.",
 				nfp_ptype->l4_ptype);
 		break;
 	}
@@ -332,7 +332,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype,
 		mbuf_ptype |= NFP_PTYPE2RTE(nfp_tunnel_ptype, L3_IPV6_EXT_UNKNOWN);
 		break;
 	default:
-		PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 3 packet type: %u",
+		PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 3 packet type: %u.",
 				nfp_ptype->l3_ptype);
 		break;
 	}
@@ -426,7 +426,7 @@ nfp_net_recv_pkts(void *rx_queue,
 		 * DPDK just checks the queue is lower than max queues
 		 * enabled. But the queue needs to be configured.
 		 */
-		PMD_RX_LOG(ERR, "RX Bad queue");
+		PMD_RX_LOG(ERR, "RX Bad queue.");
 		return 0;
 	}
 
@@ -455,7 +455,7 @@ nfp_net_recv_pkts(void *rx_queue,
 		 */
 		new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
 		if (unlikely(new_mb == NULL)) {
-			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu",
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu.",
 					rxq->port_id, rxq->qidx);
 			nfp_net_mbuf_alloc_failed(rxq);
 			break;
@@ -468,7 +468,7 @@ nfp_net_recv_pkts(void *rx_queue,
 		mb = rxb->mbuf;
 		rxb->mbuf = new_mb;
 
-		PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
+		PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u.",
 				rxds->rxd.data_len, rxq->mbuf_size);
 
 		/* Size of this segment */
@@ -532,7 +532,7 @@ nfp_net_recv_pkts(void *rx_queue,
 	if (nb_hold == 0)
 		return nb_hold;
 
-	PMD_RX_LOG(DEBUG, "RX  port_id=%hu queue_id=%hu, %hu packets received",
+	PMD_RX_LOG(DEBUG, "RX  port_id=%hu queue_id=%hu, %hu packets received.",
 			rxq->port_id, rxq->qidx, avail);
 
 	nb_hold += rxq->nb_rx_hold;
@@ -543,7 +543,7 @@ nfp_net_recv_pkts(void *rx_queue,
 	 */
 	rte_wmb();
 	if (nb_hold > rxq->rx_free_thresh) {
-		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu",
+		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.",
 				rxq->port_id, rxq->qidx, nb_hold, avail);
 		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
 		nb_hold = 0;
@@ -630,7 +630,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
 	if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
 			nb_desc > max_rx_desc || nb_desc < min_rx_desc) {
-		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+		PMD_DRV_LOG(ERR, "Wrong nb_desc value.");
 		return -EINVAL;
 	}
 
@@ -678,7 +678,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 			sizeof(struct nfp_net_rx_desc) * max_rx_desc,
 			NFP_MEMZONE_ALIGN, socket_id);
 	if (tz == NULL) {
-		PMD_DRV_LOG(ERR, "Error allocating rx dma");
+		PMD_DRV_LOG(ERR, "Error allocating rx dma.");
 		nfp_net_rx_queue_release(dev, queue_idx);
 		dev->data->rx_queues[queue_idx] = NULL;
 		return -ENOMEM;
@@ -749,14 +749,14 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
 	uint32_t qcp_rd_p;
 
 	PMD_TX_LOG(DEBUG, "Queue %hu. Check for descriptor with a complete"
-			" status", txq->qidx);
+			" status.", txq->qidx);
 
 	/* Work out how many packets have been sent */
 	qcp_rd_p = nfp_net_read_tx_free_qcp(txq);
 
 	if (qcp_rd_p == txq->rd_p) {
 		PMD_TX_LOG(DEBUG, "Queue %hu: It seems harrier is not sending "
-				"packets (%u, %u)", txq->qidx,
+				"packets (%u, %u).", txq->qidx,
 				qcp_rd_p, txq->rd_p);
 		return 0;
 	}
@@ -766,7 +766,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
 	else
 		todo = qcp_rd_p + txq->tx_count - txq->rd_p;
 
-	PMD_TX_LOG(DEBUG, "The qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
+	PMD_TX_LOG(DEBUG, "The qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u.",
 			qcp_rd_p, txq->rd_p, txq->rd_p);
 
 	if (todo == 0)
diff --git a/drivers/net/nfp/nfp_rxtx_vec_avx2.c b/drivers/net/nfp/nfp_rxtx_vec_avx2.c
index e6f6f58221..66d003f64d 100644
--- a/drivers/net/nfp/nfp_rxtx_vec_avx2.c
+++ b/drivers/net/nfp/nfp_rxtx_vec_avx2.c
@@ -125,7 +125,7 @@ nfp_vec_avx2_recv1(struct nfp_net_rxq *rxq,
 {
 	/* Allocate a new mbuf into the software ring. */
 	if (rte_pktmbuf_alloc_bulk(rxq->mem_pool, rxb, 1) < 0) {
-		PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu",
+		PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu.",
 				rxq->port_id, rxq->qidx);
 		nfp_net_mbuf_alloc_failed(rxq);
 		return -ENOMEM;
@@ -146,7 +146,7 @@ nfp_vec_avx2_recv4(struct nfp_net_rxq *rxq,
 {
 	/* Allocate 4 new mbufs into the software ring. */
 	if (rte_pktmbuf_alloc_bulk(rxq->mem_pool, rxb, 4) < 0) {
-		PMD_RX_LOG(DEBUG, "RX mbuf bulk alloc failed port_id=%u queue_id=%hu",
+		PMD_RX_LOG(DEBUG, "RX mbuf bulk alloc failed port_id=%u queue_id=%hu.",
 				rxq->port_id, rxq->qidx);
 		return -ENOMEM;
 	}
@@ -188,7 +188,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue,
 	struct nfp_net_rxq *rxq = rx_queue;
 
 	if (unlikely(rxq == NULL)) {
-		PMD_RX_LOG(ERR, "RX Bad queue");
+		PMD_RX_LOG(ERR, "RX Bad queue.");
 		return 0;
 	}
 
@@ -262,7 +262,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue,
 	if (nb_hold == 0)
 		return nb_hold;
 
-	PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
+	PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received.",
 			rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
 
 	nb_hold += rxq->nb_rx_hold;
@@ -273,7 +273,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue,
 	 */
 	rte_wmb();
 	if (nb_hold > rxq->rx_free_thresh) {
-		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu",
+		PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.",
 				rxq->port_id, rxq->qidx, nb_hold, avail);
 		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
 		nb_hold = 0;
diff --git a/drivers/net/nfp/nfp_service.c b/drivers/net/nfp/nfp_service.c
index 37e2187a3f..38ab7a39f5 100644
--- a/drivers/net/nfp/nfp_service.c
+++ b/drivers/net/nfp/nfp_service.c
@@ -27,14 +27,14 @@ nfp_service_enable(const struct rte_service_spec *service_spec,
 	/* Register the service */
 	ret = rte_service_component_register(service_spec, &info->id);
 	if (ret != 0) {
-		PMD_DRV_LOG(DEBUG, "Could not register %s", service_spec->name);
+		PMD_DRV_LOG(DEBUG, "Could not register %s.", service_spec->name);
 		return -EINVAL;
 	}
 
 	/* Set the NFP service runstate of a component. */
 	rte_service_component_runstate_set(info->id, 1);
 
-	PMD_DRV_LOG(DEBUG, "Enable service %s successfully", service_spec->name);
+	PMD_DRV_LOG(DEBUG, "Enable service %s successfully.", service_spec->name);
 
 	return 0;
 }
@@ -47,7 +47,7 @@ nfp_service_disable(struct nfp_service_info *info)
 
 	service_name = rte_service_get_name(info->id);
 	if (service_name == NULL) {
-		PMD_DRV_LOG(ERR, "Could not find service %u", info->id);
+		PMD_DRV_LOG(ERR, "Could not find service %u.", info->id);
 		return -EINVAL;
 	}
 
@@ -60,7 +60,7 @@ nfp_service_disable(struct nfp_service_info *info)
 	}
 
 	if (i == NFP_SERVICE_DISABLE_WAIT_COUNT)
-		PMD_DRV_LOG(ERR, "Could not stop service %s", service_name);
+		PMD_DRV_LOG(ERR, "Could not stop service %s.", service_name);
 
 	rte_service_component_unregister(info->id);
 
diff --git a/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/nfp/nfpcore/nfp6000_pcie.c
index 11cb45dd7d..4693577f4e 100644
--- a/drivers/net/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/nfp/nfpcore/nfp6000_pcie.c
@@ -622,7 +622,7 @@ nfp6000_area_acquire(struct nfp_cpp_area *area)
 
 	bar_num = nfp_alloc_bar(nfp, priv);
 	if (bar_num < 0) {
-		PMD_DRV_LOG(ERR, "Failed to allocate bar %d:%d:%d:%#lx: %d",
+		PMD_DRV_LOG(ERR, "Failed to allocate bar %d:%d:%d:%#lx: %d.",
 				priv->target, priv->action, priv->token,
 				priv->offset, bar_num);
 		return bar_num;
@@ -860,7 +860,7 @@ nfp6000_get_dsn(struct rte_pci_device *pci_dev,
 
 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
 	if (pos <= 0) {
-		PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found");
+		PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found.");
 		return -ENODEV;
 	}
 
@@ -868,7 +868,7 @@ nfp6000_get_dsn(struct rte_pci_device *pci_dev,
 	len = sizeof(tmp);
 
 	if (rte_pci_read_config(pci_dev, &tmp, len, pos) < 0) {
-		PMD_DRV_LOG(ERR, "NFP get device serial number failed");
+		PMD_DRV_LOG(ERR, "NFP get device serial number failed.");
 		return -ENOENT;
 	}
 
@@ -933,7 +933,7 @@ nfp6000_init(struct nfp_cpp *cpp)
 
 	ret = nfp_enable_bars(desc);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Enable bars failed");
+		PMD_DRV_LOG(ERR, "Enable bars failed.");
 		return -1;
 	}
 
@@ -1018,7 +1018,7 @@ nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev,
 
 	if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
 			NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
-		PMD_DRV_LOG(ERR, "Interface channel is not right");
+		PMD_DRV_LOG(ERR, "Interface channel is not right.");
 		free(nfp);
 		return NULL;
 	}
@@ -1026,7 +1026,7 @@ nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev,
 	/* Probe for all the common NFP devices */
 	cpp = nfp_cpp_from_device_name(pci_dev, nfp, driver_lock_needed);
 	if (cpp == NULL) {
-		PMD_DRV_LOG(ERR, "Get cpp from operation failed");
+		PMD_DRV_LOG(ERR, "Get cpp from operation failed.");
 		free(nfp);
 		return NULL;
 	}
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
index 06996b7bc8..dfc6d4613a 100644
--- a/drivers/net/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -344,7 +344,7 @@ nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
 
 	err = cpp->op->area_init(area, target_id, target_addr, size);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "Area init op failed");
+		PMD_DRV_LOG(ERR, "Area init op failed.");
 		free(area);
 		return NULL;
 	}
@@ -413,12 +413,12 @@ nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp,
 
 	area = nfp_cpp_area_alloc(cpp, destination, address, size);
 	if (area == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate CPP area");
+		PMD_DRV_LOG(ERR, "Failed to allocate CPP area.");
 		return NULL;
 	}
 
 	if (nfp_cpp_area_acquire(area) != 0) {
-		PMD_DRV_LOG(ERR, "Failed to acquire CPP area");
+		PMD_DRV_LOG(ERR, "Failed to acquire CPP area.");
 		nfp_cpp_area_free(area);
 		return NULL;
 	}
@@ -469,7 +469,7 @@ nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 	if (area->cpp->op->area_acquire != NULL) {
 		int err = area->cpp->op->area_acquire(area);
 		if (err < 0) {
-			PMD_DRV_LOG(ERR, "Area acquire op failed");
+			PMD_DRV_LOG(ERR, "Area acquire op failed.");
 			return -1;
 		}
 	}
@@ -950,14 +950,14 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev,
 	 */
 	err = cpp->op->init(cpp);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "NFP interface initialization failed");
+		PMD_DRV_LOG(ERR, "NFP interface initialization failed.");
 		free(cpp);
 		return NULL;
 	}
 
 	err = nfp_cpp_model_autodetect(cpp, &cpp->model);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "NFP model detection failed");
+		PMD_DRV_LOG(ERR, "NFP model detection failed.");
 		free(cpp);
 		return NULL;
 	}
@@ -967,7 +967,7 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev,
 		xpb_addr = 0x000a0000 + (target * 4);
 		err = nfp_xpb_readl(cpp, xpb_addr, &cpp->imb_cat_table[target]);
 		if (err < 0) {
-			PMD_DRV_LOG(ERR, "Can not read CPP mapping from device");
+			PMD_DRV_LOG(ERR, "Can not read CPP mapping from device.");
 			free(cpp);
 			return NULL;
 		}
@@ -975,7 +975,7 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev,
 
 	err = nfp_cpp_set_mu_locality_lsb(cpp);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "Can not calculate MU locality bit offset");
+		PMD_DRV_LOG(ERR, "Can not calculate MU locality bit offset.");
 		free(cpp);
 		return NULL;
 	}
@@ -1050,7 +1050,7 @@ nfp_cpp_read(struct nfp_cpp *cpp,
 
 	area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length);
 	if (area == NULL) {
-		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for read");
+		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for read.");
 		return -EACCES;
 	}
 
@@ -1089,7 +1089,7 @@ nfp_cpp_write(struct nfp_cpp *cpp,
 
 	area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length);
 	if (area == NULL) {
-		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for write");
+		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for write.");
 		return -EACCES;
 	}
 
@@ -1155,7 +1155,7 @@ nfp_cpp_map_area(struct nfp_cpp *cpp,
 
 	*area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size);
 	if (*area == NULL) {
-		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for map");
+		PMD_DRV_LOG(ERR, "Area allocation/acquire failed for map.");
 		goto err_eio;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_elf.c b/drivers/net/nfp/nfpcore/nfp_elf.c
index 766e0827f7..12a9da0fa0 100644
--- a/drivers/net/nfp/nfpcore/nfp_elf.c
+++ b/drivers/net/nfp/nfpcore/nfp_elf.c
@@ -627,7 +627,7 @@ nfp_elf_populate_fw_mip(struct nfp_elf *ectx,
 	first_entry = rte_le_to_cpu_32(mip->first_entry);
 
 	if (mip->signature != NFP_MIP_SIGNATURE) {
-		PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x",
+		PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x.",
 				rte_le_to_cpu_32(mip->signature));
 		return -EINVAL;
 	}
diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
index c334202bd7..5240de44fb 100644
--- a/drivers/net/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
@@ -110,12 +110,12 @@ nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo,
 			key = val + strlen(val) + 1) {
 		val = key + strlen(key) + 1;
 		if (val >= end) {
-			PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value");
+			PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value.");
 			return -EINVAL;
 		}
 
 		if (val + strlen(val) + 1 > end) {
-			PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value");
+			PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value.");
 			return -EINVAL;
 		}
 	}
@@ -133,7 +133,7 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db,
 
 	size = db->size;
 	if (size > len) {
-		PMD_DRV_LOG(ERR, "Unsupported hwinfo size %u > %u", size, len);
+		PMD_DRV_LOG(ERR, "Unsupported hwinfo size %u > %u.", size, len);
 		return -EINVAL;
 	}
 
@@ -141,7 +141,7 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db,
 	new_crc = nfp_crc32_posix((char *)db, size);
 	crc = (uint32_t *)(db->start + size);
 	if (new_crc != *crc) {
-		PMD_DRV_LOG(ERR, "CRC mismatch, calculated %#x, expected %#x",
+		PMD_DRV_LOG(ERR, "CRC mismatch, calculated %#x, expected %#x.",
 				new_crc, *crc);
 		return -EINVAL;
 	}
@@ -162,7 +162,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp,
 
 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
 	if (res == NULL) {
-		PMD_DRV_LOG(ERR, "HWInfo - acquire resource failed");
+		PMD_DRV_LOG(ERR, "HWInfo - acquire resource failed.");
 		return NULL;
 	}
 
@@ -181,7 +181,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp,
 
 	err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
 	if (err != (int)*cpp_size) {
-		PMD_DRV_LOG(ERR, "HWInfo - CPP read error %d", err);
+		PMD_DRV_LOG(ERR, "HWInfo - CPP read error %d.", err);
 		goto exit_free;
 	}
 
@@ -190,7 +190,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp,
 		goto exit_free;
 
 	if (header->version != NFP_HWINFO_VERSION_2) {
-		PMD_DRV_LOG(ERR, "Unknown HWInfo version: %#08x",
+		PMD_DRV_LOG(ERR, "Unknown HWInfo version: %#08x.",
 				header->version);
 		goto exit_free;
 	}
@@ -223,7 +223,7 @@ nfp_hwinfo_fetch(struct nfp_cpp *cpp,
 
 		nanosleep(&wait, NULL);
 		if (count++ > 200) {    /* 10ms * 200 = 2s */
-			PMD_DRV_LOG(ERR, "NFP access error");
+			PMD_DRV_LOG(ERR, "NFP access error.");
 			return NULL;
 		}
 	}
diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c
index 98d1d19047..16b94e6c10 100644
--- a/drivers/net/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/nfp/nfpcore/nfp_mip.c
@@ -21,18 +21,18 @@ nfp_mip_try_read(struct nfp_cpp *cpp,
 
 	ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
 	if (ret != sizeof(*mip)) {
-		PMD_DRV_LOG(ERR, "Failed to read MIP data");
+		PMD_DRV_LOG(ERR, "Failed to read MIP data.");
 		return -EIO;
 	}
 
 	if (mip->signature != NFP_MIP_SIGNATURE) {
-		PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x",
+		PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x.",
 				rte_le_to_cpu_32(mip->signature));
 		return -EINVAL;
 	}
 
 	if (mip->mip_version != NFP_MIP_VERSION) {
-		PMD_DRV_LOG(ERR, "Unsupported MIP version %d",
+		PMD_DRV_LOG(ERR, "Unsupported MIP version %d.",
 				rte_le_to_cpu_32(mip->mip_version));
 		return -EINVAL;
 	}
@@ -88,7 +88,7 @@ nfp_mip_open(struct nfp_cpp *cpp)
 
 	err = nfp_mip_read_resource(cpp, mip);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "Failed to read MIP resource");
+		PMD_DRV_LOG(ERR, "Failed to read MIP resource.");
 		free(mip);
 		return NULL;
 	}
diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c
index 2f07fcd6c1..c808af2dab 100644
--- a/drivers/net/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/nfp/nfpcore/nfp_nffw.c
@@ -175,7 +175,7 @@ nfp_nffw_info_open(struct nfp_cpp *cpp)
 
 	state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
 	if (state->res == NULL) {
-		PMD_DRV_LOG(ERR, "NFFW - acquire resource failed");
+		PMD_DRV_LOG(ERR, "NFFW - acquire resource failed.");
 		goto err_free;
 	}
 
@@ -188,7 +188,7 @@ nfp_nffw_info_open(struct nfp_cpp *cpp)
 			nfp_resource_address(state->res),
 			fwinf, sizeof(*fwinf));
 	if (err < (int)sizeof(*fwinf)) {
-		PMD_DRV_LOG(ERR, "NFFW - CPP read error %d", err);
+		PMD_DRV_LOG(ERR, "NFFW - CPP read error %d.", err);
 		goto err_release;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c
index a2f4f70081..3afbcffa42 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.c
@@ -188,7 +188,7 @@ nfp_nsp_print_extended_error(uint32_t ret_val)
 
 	for (i = 0; i < RTE_DIM(nsp_errors); i++)
 		if (ret_val == nsp_errors[i].code)
-			PMD_DRV_LOG(ERR, "Err msg: %s", nsp_errors[i].msg);
+			PMD_DRV_LOG(ERR, "Err msg: %s.", nsp_errors[i].msg);
 }
 
 static int
@@ -205,12 +205,12 @@ nfp_nsp_check(struct nfp_nsp *state)
 
 	err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "NSP - CPP readq failed %d", err);
+		PMD_DRV_LOG(ERR, "NSP - CPP readq failed %d.", err);
 		return err;
 	}
 
 	if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
-		PMD_DRV_LOG(ERR, "Can not detect NFP Service Processor");
+		PMD_DRV_LOG(ERR, "Can not detect NFP Service Processor.");
 		return -ENODEV;
 	}
 
@@ -218,7 +218,7 @@ nfp_nsp_check(struct nfp_nsp *state)
 	state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
 
 	if (state->ver.major > NSP_MAJOR || state->ver.minor < NSP_MINOR) {
-		PMD_DRV_LOG(ERR, "Unsupported ABI %hu.%hu", state->ver.major,
+		PMD_DRV_LOG(ERR, "Unsupported ABI %hu.%hu.", state->ver.major,
 				state->ver.minor);
 		return -EINVAL;
 	}
@@ -246,7 +246,7 @@ nfp_nsp_open(struct nfp_cpp *cpp)
 
 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
 	if (res == NULL) {
-		PMD_DRV_LOG(ERR, "NSP - resource acquire failed");
+		PMD_DRV_LOG(ERR, "NSP - resource acquire failed.");
 		return NULL;
 	}
 
@@ -262,7 +262,7 @@ nfp_nsp_open(struct nfp_cpp *cpp)
 
 	err = nfp_nsp_check(state);
 	if (err != 0) {
-		PMD_DRV_LOG(DEBUG, "NSP - check failed");
+		PMD_DRV_LOG(DEBUG, "NSP - check failed.");
 		nfp_nsp_close(state);
 		return NULL;
 	}
@@ -313,7 +313,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp,
 	for (;;) {
 		err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
 		if (err < 0) {
-			PMD_DRV_LOG(ERR, "NSP - CPP readq failed");
+			PMD_DRV_LOG(ERR, "NSP - CPP readq failed.");
 			return err;
 		}
 
@@ -365,7 +365,7 @@ nfp_nsp_command_real(struct nfp_nsp *state,
 
 	err = nfp_nsp_check(state);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "Check NSP command failed");
+		PMD_DRV_LOG(ERR, "Check NSP command failed.");
 		return err;
 	}
 
@@ -390,7 +390,7 @@ nfp_nsp_command_real(struct nfp_nsp *state,
 	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
 			NSP_COMMAND_START, 0);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to start",
+		PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to start.",
 				err, arg->code);
 		return err;
 	}
@@ -399,7 +399,7 @@ nfp_nsp_command_real(struct nfp_nsp *state,
 	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status,
 			NSP_STATUS_BUSY, 0);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to complete",
+		PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to complete.",
 				err, arg->code);
 		return err;
 	}
@@ -415,7 +415,7 @@ nfp_nsp_command_real(struct nfp_nsp *state,
 	err = FIELD_GET(NSP_STATUS_RESULT, reg);
 	if (err != 0) {
 		if (!arg->error_quiet)
-			PMD_DRV_LOG(ERR, "Result (error) code set: %d (%d) command: %d",
+			PMD_DRV_LOG(ERR, "Result (error) code set: %d (%d) command: %d.",
 					-err, (int)ret_val, arg->code);
 
 		if (arg->error_cb != 0)
@@ -477,7 +477,7 @@ nfp_nsp_command_buf_def(struct nfp_nsp *nsp,
 
 	if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) ||
 			!FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) {
-		PMD_DRV_LOG(ERR, "Buffer out of reach %#08x %#016lx",
+		PMD_DRV_LOG(ERR, "Buffer out of reach %#08x %#016lx.",
 				cpp_id, cpp_buf);
 		return -EINVAL;
 	}
@@ -487,7 +487,7 @@ nfp_nsp_command_buf_def(struct nfp_nsp *nsp,
 	ret = nfp_nsp_command_real(nsp, &arg->arg);
 	if (ret < 0) {
 		if (!arg->arg.error_quiet)
-			PMD_DRV_LOG(ERR, "NSP command failed");
+			PMD_DRV_LOG(ERR, "NSP command failed.");
 
 		return ret;
 	}
@@ -516,7 +516,7 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp,
 	struct nfp_cpp *cpp = nsp->cpp;
 
 	if (nsp->ver.minor < 13) {
-		PMD_DRV_LOG(ERR, "NSP: Code %#04x with buffer not supported ABI %hu.%hu)",
+		PMD_DRV_LOG(ERR, "NSP: Code %#04x with buffer not supported ABI %hu.%hu).",
 				arg->arg.code, nsp->ver.major, nsp->ver.minor);
 		return -EOPNOTSUPP;
 	}
@@ -531,7 +531,7 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp,
 	size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M +
 			FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K;
 	if (size < max_size) {
-		PMD_DRV_LOG(ERR, "NSP: default buffer too small for command %#04x (%zu < %zu)",
+		PMD_DRV_LOG(ERR, "NSP: default buffer too small for command %#04x (%zu < %zu).",
 				arg->arg.code, size, max_size);
 		return -EINVAL;
 	}
@@ -563,7 +563,7 @@ nfp_nsp_wait(struct nfp_nsp *state)
 	}
 
 	if (err != 0)
-		PMD_DRV_LOG(ERR, "NSP failed to respond %d", err);
+		PMD_DRV_LOG(ERR, "NSP failed to respond %d.", err);
 
 	return err;
 }
@@ -616,9 +616,9 @@ nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state,
 		return;
 
 	if (major >= RTE_DIM(major_msg))
-		PMD_DRV_LOG(INFO, "FW loading status: %x", ret_val);
+		PMD_DRV_LOG(INFO, "FW loading status: %x.", ret_val);
 	else if (minor >= RTE_DIM(minor_msg))
-		PMD_DRV_LOG(INFO, "%s, reason code: %d", major_msg[major], minor);
+		PMD_DRV_LOG(INFO, "%s, reason code: %d.", major_msg[major], minor);
 	else
 		PMD_DRV_LOG(INFO, "%s%c %s", major_msg[major],
 				minor != 0 ? ',' : '.', minor_msg[minor]);
@@ -818,7 +818,7 @@ nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state,
 	size_t min_size;
 
 	if (strnlen(default_val, size) == size) {
-		PMD_DRV_LOG(ERR, "NSP HWinfo default value not NULL terminated");
+		PMD_DRV_LOG(ERR, "NSP HWinfo default value not NULL terminated.");
 		return -EINVAL;
 	}
 
@@ -831,12 +831,12 @@ nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state,
 		if (ret == -ENOENT)
 			goto default_return;
 
-		PMD_DRV_LOG(ERR, "NSP HWinfo lookup failed: %d", ret);
+		PMD_DRV_LOG(ERR, "NSP HWinfo lookup failed: %d.", ret);
 		return ret;
 	}
 
 	if (strnlen(buf, min_size) == min_size) {
-		PMD_DRV_LOG(ERR, "NSP HWinfo value not NULL terminated");
+		PMD_DRV_LOG(ERR, "NSP HWinfo value not NULL terminated.");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
index e03d0db299..b1cce03e70 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
@@ -36,7 +36,7 @@ nfp_nsp_identify(struct nfp_nsp *nsp)
 	memset(ni, 0, sizeof(*ni));
 	ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Reading BSP version failed %d", ret);
+		PMD_DRV_LOG(ERR, "Reading BSP version failed %d.", ret);
 		goto exit_free;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
index 22c9ee98a4..1fcd54656a 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -223,7 +223,7 @@ nfp_eth_calc_port_geometry(struct nfp_eth_table *table)
 
 			if (table->ports[i].label_subport ==
 					table->ports[j].label_subport)
-				PMD_DRV_LOG(DEBUG, "Port %d subport %d is a duplicate",
+				PMD_DRV_LOG(DEBUG, "Port %d subport %d is a duplicate.",
 						table->ports[i].label_port,
 						table->ports[i].label_subport);
 
@@ -267,7 +267,7 @@ nfp_eth_read_ports_real(struct nfp_nsp *nsp)
 	memset(entries, 0, NSP_ETH_TABLE_SIZE);
 	ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Reading port table failed %d", ret);
+		PMD_DRV_LOG(ERR, "Reading port table failed %d.", ret);
 		goto err;
 	}
 
@@ -281,7 +281,7 @@ nfp_eth_read_ports_real(struct nfp_nsp *nsp)
 	 * above.
 	 */
 	if (ret != 0 && ret != cnt) {
-		PMD_DRV_LOG(ERR, "Table entry count (%d) unmatch entries present (%d)",
+		PMD_DRV_LOG(ERR, "Table entry count (%d) unmatch entries present (%d).",
 				ret, cnt);
 		goto err;
 	}
@@ -362,12 +362,12 @@ nfp_eth_config_start(struct nfp_cpp *cpp,
 
 	ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "Reading port table failed %d", ret);
+		PMD_DRV_LOG(ERR, "Reading port table failed %d.", ret);
 		goto err;
 	}
 
 	if ((entries[idx].port & NSP_ETH_PORT_LANES_MASK) == 0) {
-		PMD_DRV_LOG(ERR, "Trying to set port state on disabled port %d", idx);
+		PMD_DRV_LOG(ERR, "Trying to set port state on disabled port %d.", idx);
 		goto err;
 	}
 
@@ -536,7 +536,7 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp,
 	 * codes were initially not populated correctly.
 	 */
 	if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
-		PMD_DRV_LOG(ERR, "Set operations not supported, please update flash");
+		PMD_DRV_LOG(ERR, "Set operations not supported, please update flash.");
 		return -EOPNOTSUPP;
 	}
 
@@ -661,7 +661,7 @@ nfp_eth_set_speed(struct nfp_nsp *nsp,
 
 	rate = nfp_eth_speed2rate(speed);
 	if (rate == RATE_INVALID) {
-		PMD_DRV_LOG(ERR, "Could not find matching lane rate for speed %u", speed);
+		PMD_DRV_LOG(ERR, "Could not find matching lane rate for speed %u.", speed);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c
index b05144036a..6437a78852 100644
--- a/drivers/net/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/nfp/nfpcore/nfp_resource.c
@@ -69,7 +69,7 @@ nfp_cpp_resource_find(struct nfp_cpp *cpp,
 
 	/* Search for a matching entry */
 	if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8) == 0) {
-		PMD_DRV_LOG(ERR, "Grabbing device lock not supported");
+		PMD_DRV_LOG(ERR, "Grabbing device lock not supported.");
 		return -EOPNOTSUPP;
 	}
 
@@ -109,19 +109,19 @@ nfp_resource_try_acquire(struct nfp_cpp *cpp,
 	int err;
 
 	if (nfp_cpp_mutex_lock(dev_mutex) != 0) {
-		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex lock failed");
+		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex lock failed.");
 		return -EINVAL;
 	}
 
 	err = nfp_cpp_resource_find(cpp, res);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "RESOURCE - CPP resource find failed");
+		PMD_DRV_LOG(ERR, "RESOURCE - CPP resource find failed.");
 		goto err_unlock_dev;
 	}
 
 	err = nfp_cpp_mutex_trylock(res->mutex);
 	if (err != 0) {
-		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex trylock failed");
+		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex trylock failed.");
 		goto err_res_mutex_free;
 	}
 
@@ -173,7 +173,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp,
 	dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
 			NFP_RESOURCE_TBL_BASE, NFP_RESOURCE_TBL_KEY);
 	if (dev_mutex == NULL) {
-		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex alloc failed");
+		PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex alloc failed.");
 		goto err_free;
 	}
 
@@ -185,12 +185,12 @@ nfp_resource_acquire(struct nfp_cpp *cpp,
 		if (err == 0)
 			break;
 		if (err != -EBUSY) {
-			PMD_DRV_LOG(ERR, "RESOURCE - try acquire failed");
+			PMD_DRV_LOG(ERR, "RESOURCE - try acquire failed.");
 			goto mutex_free;
 		}
 
 		if (count++ > 1000) {    /* 1ms * 1000 = 1s */
-			PMD_DRV_LOG(ERR, "Error: resource %s timed out", name);
+			PMD_DRV_LOG(ERR, "Error: resource %s timed out.", name);
 			goto mutex_free;
 		}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c
index fecf3d7b68..9f0d17cd0a 100644
--- a/drivers/net/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c
@@ -272,7 +272,7 @@ nfp_rtsym_size(const struct nfp_rtsym *sym)
 {
 	switch (sym->type) {
 	case NFP_RTSYM_TYPE_NONE:
-		PMD_DRV_LOG(ERR, "The type of rtsym '%s' is NONE", sym->name);
+		PMD_DRV_LOG(ERR, "The type of rtsym '%s' is NONE.", sym->name);
 		return 0;
 	case NFP_RTSYM_TYPE_OBJECT:
 		/* FALLTHROUGH */
@@ -281,7 +281,7 @@ nfp_rtsym_size(const struct nfp_rtsym *sym)
 	case NFP_RTSYM_TYPE_ABS:
 		return sizeof(uint64_t);
 	default:
-		PMD_DRV_LOG(ERR, "Unknown RTSYM type %u", sym->type);
+		PMD_DRV_LOG(ERR, "Unknown RTSYM type %u.", sym->type);
 		return 0;
 	}
 }
@@ -296,7 +296,7 @@ nfp_rtsym_to_dest(struct nfp_cpp *cpp,
 		uint64_t *addr)
 {
 	if (sym->type != NFP_RTSYM_TYPE_OBJECT) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s': direct access to non-object rtsym",
+		PMD_DRV_LOG(ERR, "RTSYM '%s': direct access to non-object rtsym.",
 				sym->name);
 		return -EINVAL;
 	}
@@ -314,7 +314,7 @@ nfp_rtsym_to_dest(struct nfp_cpp *cpp,
 		*cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token,
 				sym->domain);
 	} else {
-		PMD_DRV_LOG(ERR, "RTSYM '%s': unhandled target encoding: %d",
+		PMD_DRV_LOG(ERR, "RTSYM '%s': unhandled target encoding: %d.",
 				sym->name, sym->target);
 		return -EINVAL;
 	}
@@ -338,7 +338,7 @@ nfp_rtsym_read_real(struct nfp_cpp *cpp,
 	uint64_t sym_size = nfp_rtsym_size(sym);
 
 	if (offset >= sym_size) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s' read out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s' read out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -387,7 +387,7 @@ nfp_rtsym_readl_real(struct nfp_cpp *cpp,
 	uint32_t cpp_id;
 
 	if (offset + 4 > nfp_rtsym_size(sym)) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s': readl out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s': readl out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -420,7 +420,7 @@ nfp_rtsym_readq_real(struct nfp_cpp *cpp,
 	uint32_t cpp_id;
 
 	if (offset + 8 > nfp_rtsym_size(sym)) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s': readq out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s': readq out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -461,7 +461,7 @@ nfp_rtsym_write_real(struct nfp_cpp *cpp,
 	uint64_t sym_size = nfp_rtsym_size(sym);
 
 	if (offset > sym_size) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -498,7 +498,7 @@ nfp_rtsym_writel_real(struct nfp_cpp *cpp,
 	uint32_t cpp_id;
 
 	if (offset + 4 > nfp_rtsym_size(sym)) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -531,7 +531,7 @@ nfp_rtsym_writeq_real(struct nfp_cpp *cpp,
 	uint32_t cpp_id;
 
 	if (offset + 8 > nfp_rtsym_size(sym)) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds", sym->name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name);
 		return -ENXIO;
 	}
 
@@ -593,7 +593,7 @@ nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl,
 		err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val);
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx",
+		PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx.",
 				name, sym->size);
 		err = -EINVAL;
 		break;
@@ -648,7 +648,7 @@ nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl,
 		err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value);
 		break;
 	default:
-		PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx",
+		PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx.",
 				name, sym_size);
 		err = -EINVAL;
 		break;
@@ -672,26 +672,26 @@ nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl,
 
 	sym = nfp_rtsym_lookup(rtbl, name);
 	if (sym == NULL) {
-		PMD_DRV_LOG(ERR, "Symbol lookup fails for %s", name);
+		PMD_DRV_LOG(ERR, "Symbol lookup fails for %s.", name);
 		return NULL;
 	}
 
 	ret = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0,
 			&cpp_id, &addr);
 	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "RTSYM '%s': mapping failed", name);
+		PMD_DRV_LOG(ERR, "RTSYM '%s': mapping failed.", name);
 		return NULL;
 	}
 
 	if (sym->size < min_size) {
-		PMD_DRV_LOG(ERR, "Symbol %s too small (%" PRIu64 " < %u)", name,
+		PMD_DRV_LOG(ERR, "Symbol %s too small (%" PRIu64 " < %u).", name,
 				sym->size, min_size);
 		return NULL;
 	}
 
 	mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr + offset, sym->size, area);
 	if (mem == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to map symbol %s", name);
+		PMD_DRV_LOG(ERR, "Failed to map symbol %s.", name);
 		return NULL;
 	}
 
@@ -741,13 +741,13 @@ nfp_rtsym_readl_indirect(struct nfp_rtsym_table *rtbl,
 
 	aux_sym = nfp_rtsym_lookup(rtbl, aux_name);
 	if (aux_sym == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to find symbol %s", aux_name);
+		PMD_DRV_LOG(ERR, "Failed to find symbol %s.", aux_name);
 		return -ENOENT;
 	}
 
 	sym = nfp_rtsym_lookup(rtbl, name);
 	if (sym == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to find symbol %s", name);
+		PMD_DRV_LOG(ERR, "Failed to find symbol %s.", name);
 		return -ENOENT;
 	}
 
@@ -791,13 +791,13 @@ nfp_rtsym_writel_indirect(struct nfp_rtsym_table *rtbl,
 
 	aux_sym = nfp_rtsym_lookup(rtbl, aux_name);
 	if (aux_sym == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to find symbol %s", aux_name);
+		PMD_DRV_LOG(ERR, "Failed to find symbol %s.", aux_name);
 		return -ENOENT;
 	}
 
 	sym = nfp_rtsym_lookup(rtbl, name);
 	if (sym == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to find symbol %s", name);
+		PMD_DRV_LOG(ERR, "Failed to find symbol %s.", name);
 		return -ENOENT;
 	}
 
diff --git a/drivers/net/nfp/nfpcore/nfp_sync.c b/drivers/net/nfp/nfpcore/nfp_sync.c
index 686cdf8eb1..1b594257c6 100644
--- a/drivers/net/nfp/nfpcore/nfp_sync.c
+++ b/drivers/net/nfp/nfpcore/nfp_sync.c
@@ -91,11 +91,11 @@ nfp_sync_free(struct nfp_sync *sync)
 	}
 
 	if (sync->process.avail != NFP_SYNC_ELEMENT_MAX)
-		PMD_DRV_LOG(ERR, "Sync process handle residue");
+		PMD_DRV_LOG(ERR, "Sync process handle residue.");
 
 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
 		if (sync->pci[i].avail != NFP_SYNC_ELEMENT_MAX)
-			PMD_DRV_LOG(ERR, "Sync %s pci handle residue",
+			PMD_DRV_LOG(ERR, "Sync %s pci handle residue.",
 					sync->pci[i].pci_name);
 	}
 
@@ -206,7 +206,7 @@ nfp_sync_process_inner_handle_alloc(struct nfp_sync *sync,
 
 	handle = nfp_sync_common_handle_alloc(&sync->process, magic, size);
 	if (handle == NULL)
-		PMD_DRV_LOG(ERR, "Process handle alloc failed");
+		PMD_DRV_LOG(ERR, "Process handle alloc failed.");
 
 	rte_spinlock_unlock(&sync->spinlock);
 
@@ -280,7 +280,7 @@ nfp_sync_pci_inner_handle_alloc(struct nfp_sync *sync,
 	handle = nfp_sync_common_handle_alloc(&sync->pci[pci_avail_id],
 			magic, size);
 	if (handle == NULL)
-		PMD_DRV_LOG(ERR, "PCI handle alloc failed");
+		PMD_DRV_LOG(ERR, "PCI handle alloc failed.");
 
 	rte_spinlock_unlock(&sync->spinlock);
 
diff --git a/drivers/vdpa/nfp/nfp_vdpa.c b/drivers/vdpa/nfp/nfp_vdpa.c
index 4fb6e93d1f..7f2f21ec6c 100644
--- a/drivers/vdpa/nfp/nfp_vdpa.c
+++ b/drivers/vdpa/nfp/nfp_vdpa.c
@@ -134,7 +134,7 @@ nfp_vdpa_vfio_setup(struct nfp_vdpa_dev *device)
 	if (device->vfio_group_fd < 0)
 		goto container_destroy;
 
-	DRV_VDPA_LOG(DEBUG, "The container_fd=%d, group_fd=%d,",
+	DRV_VDPA_LOG(DEBUG, "The container_fd=%d, group_fd=%d.",
 			device->vfio_container_fd, device->vfio_group_fd);
 
 	ret = rte_pci_map_device(pci_dev);
@@ -178,7 +178,7 @@ nfp_vdpa_dma_do_unmap(struct rte_vhost_memory *mem,
 				region->size);
 		if (ret < 0) {
 			/* Here should not return, even error happened. */
-			DRV_VDPA_LOG(ERR, "DMA unmap failed. Times: %u", i);
+			DRV_VDPA_LOG(ERR, "DMA unmap failed. Times: %u.", i);
 		}
 	}
 
@@ -225,7 +225,7 @@ nfp_vdpa_dma_map(struct nfp_vdpa_dev *device,
 	}
 
 	vfio_container_fd = device->vfio_container_fd;
-	DRV_VDPA_LOG(DEBUG, "The vfio_container_fd %d", vfio_container_fd);
+	DRV_VDPA_LOG(DEBUG, "The vfio_container_fd %d.", vfio_container_fd);
 
 	if (do_map)
 		ret = nfp_vdpa_dma_do_map(mem, mem->nregions, vfio_container_fd);
@@ -533,7 +533,7 @@ nfp_vdpa_enable_vfio_intr(struct nfp_vdpa_dev *device,
 		for (i = 0; i < nr_vring; i += 2) {
 			fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
 			if (fd < 0) {
-				DRV_VDPA_LOG(ERR, "Can't setup eventfd");
+				DRV_VDPA_LOG(ERR, "Can't setup eventfd.");
 				return -EINVAL;
 			}
 
@@ -587,7 +587,7 @@ nfp_vdpa_read_kickfd(int kickfd)
 
 		if (errno != EINTR && errno != EWOULDBLOCK &&
 				errno != EAGAIN) {
-			DRV_VDPA_LOG(ERR, "Error reading kickfd");
+			DRV_VDPA_LOG(ERR, "Error reading kickfd.");
 			break;
 		}
 	}
@@ -609,7 +609,7 @@ nfp_vdpa_notify_epoll_ctl(uint32_t queue_num,
 		ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
 		ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD, vring.kickfd, &ev);
 		if (ret < 0) {
-			DRV_VDPA_LOG(ERR, "Epoll add error for queue %d", qid);
+			DRV_VDPA_LOG(ERR, "Epoll add error for queue %d.", qid);
 			return ret;
 		}
 	}
@@ -633,7 +633,7 @@ nfp_vdpa_notify_epoll_wait(uint32_t queue_num,
 			if (errno == EINTR)
 				continue;
 
-			DRV_VDPA_LOG(ERR, "Epoll wait fail");
+			DRV_VDPA_LOG(ERR, "Epoll wait fail.");
 			return -EACCES;
 		}
 
@@ -794,7 +794,7 @@ nfp_vdpa_vring_epoll_ctl(uint32_t queue_num,
 		ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
 		ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD, vring.kickfd, &ev);
 		if (ret < 0) {
-			DRV_VDPA_LOG(ERR, "Epoll add error for queue %u", qid);
+			DRV_VDPA_LOG(ERR, "Epoll add error for queue %u.", qid);
 			return ret;
 		}
 	}
@@ -808,7 +808,7 @@ nfp_vdpa_vring_epoll_ctl(uint32_t queue_num,
 		ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD,
 				device->intr_fd[qid], &ev);
 		if (ret < 0) {
-			DRV_VDPA_LOG(ERR, "Epoll add error for queue %u", qid);
+			DRV_VDPA_LOG(ERR, "Epoll add error for queue %u.", qid);
 			return ret;
 		}
 
@@ -834,7 +834,7 @@ nfp_vdpa_vring_epoll_wait(uint32_t queue_num,
 			if (errno == EINTR)
 				continue;
 
-			DRV_VDPA_LOG(ERR, "Epoll wait fail");
+			DRV_VDPA_LOG(ERR, "Epoll wait fail.");
 			return -EACCES;
 		}
 
@@ -966,7 +966,7 @@ nfp_vdpa_dev_config(int vid)
 	vdev = rte_vhost_get_vdpa_device(vid);
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -993,7 +993,7 @@ nfp_vdpa_dev_close(int vid)
 	vdev = rte_vhost_get_vdpa_device(vid);
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -1032,7 +1032,7 @@ nfp_vdpa_get_vfio_group_fd(int vid)
 	vdev = rte_vhost_get_vdpa_device(vid);
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -1048,7 +1048,7 @@ nfp_vdpa_get_vfio_device_fd(int vid)
 	vdev = rte_vhost_get_vdpa_device(vid);
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -1099,7 +1099,7 @@ nfp_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
 
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -1147,12 +1147,12 @@ nfp_vdpa_set_features(int32_t vid)
 	struct rte_vdpa_device *vdev;
 	struct nfp_vdpa_dev_node *node;
 
-	DRV_VDPA_LOG(DEBUG, "Start vid=%d", vid);
+	DRV_VDPA_LOG(DEBUG, "Start vid=%d.", vid);
 
 	vdev = rte_vhost_get_vdpa_device(vid);
 	node = nfp_vdpa_find_node_by_vdev(vdev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev);
+		DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev);
 		return -ENODEV;
 	}
 
@@ -1165,7 +1165,7 @@ nfp_vdpa_set_features(int32_t vid)
 	if (device->hw.sw_lm) {
 		ret = nfp_vdpa_sw_fallback(device);
 		if (ret != 0) {
-			DRV_VDPA_LOG(ERR, "Software fallback start failed");
+			DRV_VDPA_LOG(ERR, "Software fallback start failed.");
 			return -1;
 		}
 	}
@@ -1178,7 +1178,7 @@ nfp_vdpa_set_vring_state(int vid,
 		int vring,
 		int state)
 {
-	DRV_VDPA_LOG(DEBUG, "Start vid=%d, vring=%d, state=%d", vid, vring, state);
+	DRV_VDPA_LOG(DEBUG, "Start vid=%d, vring=%d, state=%d.", vid, vring, state);
 	return 0;
 }
 
@@ -1227,7 +1227,7 @@ nfp_vdpa_pci_probe(struct rte_pci_device *pci_dev)
 
 	device->vdev = rte_vdpa_register_device(&pci_dev->device, &nfp_vdpa_ops);
 	if (device->vdev == NULL) {
-		DRV_VDPA_LOG(ERR, "Failed to register device %s", pci_dev->name);
+		DRV_VDPA_LOG(ERR, "Failed to register device %s.", pci_dev->name);
 		goto vfio_teardown;
 	}
 
@@ -1263,7 +1263,7 @@ nfp_vdpa_pci_remove(struct rte_pci_device *pci_dev)
 
 	node = nfp_vdpa_find_node_by_pdev(pci_dev);
 	if (node == NULL) {
-		DRV_VDPA_LOG(ERR, "Invalid device: %s", pci_dev->name);
+		DRV_VDPA_LOG(ERR, "Invalid device: %s.", pci_dev->name);
 		return -ENODEV;
 	}
 
diff --git a/drivers/vdpa/nfp/nfp_vdpa_core.c b/drivers/vdpa/nfp/nfp_vdpa_core.c
index bb5375bb5f..b3076104a0 100644
--- a/drivers/vdpa/nfp/nfp_vdpa_core.c
+++ b/drivers/vdpa/nfp/nfp_vdpa_core.c
@@ -80,7 +80,7 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw,
 		notify_base += NFP_VDPA_NOTIFY_ADDR_INTERVAL;
 
 		vdpa_hw->notify_region = queue;
-		DRV_CORE_LOG(DEBUG, "The notify_addr[%d] at %p, notify_addr[%d] at %p",
+		DRV_CORE_LOG(DEBUG, "The notify_addr[%d] at %p, notify_addr[%d] at %p.",
 				idx, vdpa_hw->notify_addr[idx],
 				idx + 1, vdpa_hw->notify_addr[idx + 1]);
 	}
-- 
2.39.1


^ permalink raw reply	[relevance 3%]

* [RFC v3 00/10] eventdev: remove single-event enqueue and dequeue
  @ 2024-10-17  6:38  3% ` Mattias Rönnblom
  2024-10-17  6:38 11%   ` [RFC v3 10/10] eventdev: remove single event " Mattias Rönnblom
                     ` (3 more replies)
  0 siblings, 4 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-17  6:38 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single-event enqueue and dequeue functions from the
eventdev "ops" struct, to reduce complexity, leaving performance
unaffected.

This ABI change has been announced as a DPDK deprication notice,
originally scheduled for DPDK 23.11.

Mattias Rönnblom (9):
  event/dsw: remove single event enqueue and dequeue
  event/dlb2: remove single event enqueue and dequeue
  event/octeontx: remove single event enqueue and dequeue
  event/sw: remove single event enqueue and dequeue
  event/dpaa: remove single event enqueue and dequeue
  event/dpaa2: remove single event enqueue and dequeue
  event/opdl: remove single event enqueue and dequeue
  event/skeleton: remove single event enqueue and dequeue
  eventdev: remove single event enqueue and dequeue

Pavan Nikhilesh (1):
  event/cnxk: remove single event enqueue and dequeue

 doc/guides/rel_notes/deprecation.rst       |  6 +-
 doc/guides/rel_notes/release_24_11.rst     |  3 +
 drivers/event/cnxk/cn10k_eventdev.c        | 74 ++--------------------
 drivers/event/cnxk/cn10k_worker.c          | 49 +++++++-------
 drivers/event/cnxk/cn10k_worker.h          |  1 -
 drivers/event/cnxk/cn9k_eventdev.c         | 73 +--------------------
 drivers/event/cnxk/cn9k_worker.c           | 26 +++-----
 drivers/event/cnxk/cn9k_worker.h           |  3 -
 drivers/event/dlb2/dlb2.c                  | 40 +-----------
 drivers/event/dpaa/dpaa_eventdev.c         | 27 +-------
 drivers/event/dpaa2/dpaa2_eventdev.c       | 15 -----
 drivers/event/dsw/dsw_evdev.c              |  2 -
 drivers/event/dsw/dsw_evdev.h              |  2 -
 drivers/event/dsw/dsw_event.c              | 12 ----
 drivers/event/octeontx/ssovf_evdev.h       |  1 -
 drivers/event/octeontx/ssovf_worker.c      | 40 ++----------
 drivers/event/opdl/opdl_evdev.c            |  2 -
 drivers/event/skeleton/skeleton_eventdev.c | 29 ---------
 drivers/event/sw/sw_evdev.c                |  2 -
 drivers/event/sw/sw_evdev.h                |  2 -
 drivers/event/sw/sw_evdev_worker.c         | 12 ----
 lib/eventdev/eventdev_pmd.h                |  4 --
 lib/eventdev/eventdev_private.c            | 22 -------
 lib/eventdev/rte_eventdev.h                | 21 ++----
 lib/eventdev/rte_eventdev_core.h           | 11 ----
 25 files changed, 52 insertions(+), 427 deletions(-)

-- 
2.43.0


^ permalink raw reply	[relevance 3%]

* [RFC v3 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-17  6:38  3% ` [RFC v3 00/10] eventdev: remove single-event " Mattias Rönnblom
@ 2024-10-17  6:38 11%   ` Mattias Rönnblom
  2024-10-21  7:25  0%   ` [RFC v3 00/10] eventdev: remove single-event " Jerin Jacob
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-17  6:38 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single event enqueue and dequeue, since they did not
provide any noticeable performance benefits.

This is a change of the ABI, previously announced as a deprecation
notice. These functions were not directly invoked by the application,
so the API remains unaffected.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

RFC v3:
 * Update release notes. (Jerin Jacob)
 * Remove single-event enqueue and dequeue function typedefs.
   (Pavan Nikhilesh)
---
 doc/guides/rel_notes/deprecation.rst   |  6 +-----
 doc/guides/rel_notes/release_24_11.rst |  3 +++
 lib/eventdev/eventdev_pmd.h            |  4 ----
 lib/eventdev/eventdev_private.c        | 22 ----------------------
 lib/eventdev/rte_eventdev.h            | 21 ++++-----------------
 lib/eventdev/rte_eventdev_core.h       | 11 -----------
 6 files changed, 8 insertions(+), 59 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 20fcfedb7b..f501923fb5 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -173,11 +173,7 @@ Deprecation Notices
 
 * eventdev: The single-event (non-burst) enqueue and dequeue operations,
   used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
-  will be removed in DPDK 23.11.
-  This simplification includes changing the layout and potentially also
-  the size of the public ``rte_event_fp_ops`` struct, breaking the ABI.
-  Since these functions are not called directly by the application,
-  the API remains unaffected.
+  are removed in DPDK 24.11.
 
 * pipeline: The pipeline library legacy API (functions rte_pipeline_*)
   will be deprecated and subsequently removed in DPDK 24.11 release.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index acc512c70a..d356e1edc5 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -323,6 +323,9 @@ ABI Changes
 
 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
+* eventdev: The PMD single-event enqueue and dequeue function pointers are removed
+  from ``rte_event_fp_fps``.
+
 * graph: To accommodate node specific xstats counters, added ``xstat_cntrs``,
   ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``,
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index af855e3467..36148f8d86 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
 
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< Pointer to PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< Pointer to PMD enqueue burst function(op new variant) */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 	event_maintain_t maintain;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index b628f4a69e..6df129fc2d 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -5,15 +5,6 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev.h"
 
-static uint16_t
-dummy_event_enqueue(__rte_unused void *port,
-		    __rte_unused const struct rte_event *ev)
-{
-	RTE_EDEV_LOG_ERR(
-		"event enqueue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_enqueue_burst(__rte_unused void *port,
 			  __rte_unused const struct rte_event ev[],
@@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
-static uint16_t
-dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
-		    __rte_unused uint64_t timeout_ticks)
-{
-	RTE_EDEV_LOG_ERR(
-		"event dequeue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_dequeue_burst(__rte_unused void *port,
 			  __rte_unused struct rte_event ev[],
@@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
 	static const struct rte_event_fp_ops dummy = {
-		.enqueue = dummy_event_enqueue,
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
-		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
@@ -153,11 +133,9 @@ void
 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 		     const struct rte_eventdev *dev)
 {
-	fp_op->enqueue = dev->enqueue;
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
-	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
 	fp_op->txa_enqueue = dev->txa_enqueue;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index b5c3c16dd0..fabd1490db 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	}
 #endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->enqueue)(port, ev);
-	else
-		return fn(port, ev, nb_events);
+
+	return fn(port, ev, nb_events);
 }
 
 /**
@@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	}
 #endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->dequeue)(port, ev, timeout_ticks);
-	else
-		return (fp_ops->dequeue_burst)(port, ev, nb_events,
-					       timeout_ticks);
+
+	return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
 }
 
 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 2706d5e6c8..1818483044 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -12,18 +12,11 @@
 extern "C" {
 #endif
 
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
 typedef uint16_t (*event_enqueue_burst_t)(void *port,
 					  const struct rte_event ev[],
 					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-				    uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 					  uint16_t nb_events,
 					  uint64_t timeout_ticks);
@@ -60,16 +53,12 @@ typedef void (*event_preschedule_t)(void *port,
 struct __rte_cache_aligned rte_event_fp_ops {
 	void **data;
 	/**< points to array of internal port data pointers */
-	event_enqueue_t enqueue;
-	/**< PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< PMD enqueue burst new function. */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< PMD enqueue burst fwd function. */
-	event_dequeue_t dequeue;
-	/**< PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< PMD dequeue burst function. */
 	event_maintain_t maintain;
-- 
2.43.0


^ permalink raw reply	[relevance 11%]

* Re: [EXTERNAL] Re: [RFC PATCH 0/3] add feature arc in rte_graph
  2024-10-16  9:38  0%       ` Robin Jarry
@ 2024-10-16 13:50  0%         ` Nitin Saxena
  2024-10-17  7:03  0%           ` Nitin Saxena
  0 siblings, 1 reply; 169+ results
From: Nitin Saxena @ 2024-10-16 13:50 UTC (permalink / raw)
  To: Robin Jarry
  Cc: David Marchand, Nitin Saxena, Jerin Jacob,
	Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram, Zhirun Yan,
	dev, Christophe Fontaine

Hi Robin,

Thanks for the review
Please see my replies inline

Thanks,
Nitin

On Wed, Oct 16, 2024 at 3:08 PM Robin Jarry <rjarry@redhat.com> wrote:
>
> Hi folks,
>
> David Marchand, Oct 16, 2024 at 11:24:
> > On Mon, Oct 14, 2024 at 1:12 PM Nitin Saxena <nsaxena@marvell.com> wrote:
> >> I had pushed non RFC patch series before -rc1 date (11th oct).
> >> We have an ABI change in this patch series https://patches.dpdk.org/project/dpdk/patch/20241010133111.2764712-3-nsaxena@marvell.com/
> >> Could you help merge this patch series in rc2 otherwise it has to wait for next LTS
> >
> > Just read through the series, I am not confident with this addition.
> > It requires a lot of changes in the node code for supporting it, where
> > it should be something handled in/facilitated by the graph library
> > itself.
>
> As far as I can tell, it will be very complicated (if not impossible) to
> determine in a generic manner whether a packet must be steered towards
> a sub tree or not. The decision *must* come from the originating node in
> some way or another.

Nitin> I am not sure if it *must* always be from the originating node?
What about a control plane which wants to enable "IP4 feature" on
interface  'X'  by assigning IP address?
A originating node (say: ip4-input) *must not* activate IP4 lookup
sub-graph for interface "X " until control plane assigns any IP
address to it.

Regarding the complexity of adopting feature arc changes in fast path,
- a sub-optimal change for feature-arc would be simple and trivial but
at the cost of performance.
- Complexity increases when feature arc changes are optimally
integrated (like "ip4_rewrite" changes in the patch) with no
performance regression

>
> > I did not read much from Robin or Christophe who have been writing
> > more node code than me.
> > I would prefer their opinion before going forward.
>
> This series is indeed very dense. I like the concept of having
> extensible sub trees in the graph but it feels like the implementation
> is more complex than it should be.
>
> Lacking of another solution, we went for a naive approach in grout.
> Basically, some nodes have undefined next nodes which are extended using
> a dedicated API.

Nitin> With an initial glance, it looks like "grout" is trying to
solve a use-case where a child is being added to the parent's
undefined next node. This is trying to create a runtime  parent-child
relationship

On the other hand, feature arc not just create parent-child
relationships but also sibling-sibling relationships as well. Also
enabling sub-graph per interface is critical functionality in feature
arc that adds complexity

Let's assume a use-case in ingress direction, at the IPv4 layer,
where IPv4-input is the *originating node* and

- On interface X, IPsec-policy, IP4-classify() and IPv4-lookup
sub-graphs are enabled in a sequential order
- On interface Y, IP4-classify() and IPv4-lookup sub-graphs are
enabled. in a sequential order. i.e. IPsec-policy is *disabled* on
interface Y

In fast path, following processing should happen for "mbuf0" which is
received on interface "X"
- "ipv4-input" sends mbuf0 to the first enabled sub-graph node for
interface X, "IPsec-policy"
- In "IPsec-policy" node processing, if policy action results in
"bypass" action for mbuf0, it must then be sent to next enabled
sub-graph  i.e. "IPv4-classify" (from "IPsec-policy" node)
- In "IPv4-classify" node processing, if classify fails for mbuf0 then
it should finally be sent to "IPv4-lookup" node (from "IPv4-classify"
node)

whereas for "mbuf1" received on interface Y following fast path
processing must happen
- "Ipv4-input" sends mbuf1 to the first enabled sub-graph node for
interface Y, "IPv4-classify"
- If "IPv4-classify" fails for mbuf1, then it should finally be sent
to IPv4-lookup node

To behave differently for interface X and interface Y as above
- First of all, IPsec-policy/IPv4-classify/IPv4-lookup must be
connected to "ipv4-input" node (Parent-Child relationship)
- Also, IPsec-policy/IPv4-classify/IPv4-lookup must also be connected
with each other (Sibling-Sibling relationship)
- Fast path APIs provide *rte_edges_t* to send mbuf from one node to
another node
   1. Based on interface (either Interface X or Interface Y)
   2. Based on which node, fast path APIs are called. Next enabled
feature/sub-graph can only be determined from previous enabled
feature/sub-graph in fast path

Not sure if grout handles above use-cases in the same manner. AFAIR ,
for any control plane change grout re-creates "graph" objects which
may not be required with feature arc.

>
> https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L23-L31
>
> This API can be used by other nodes to attach themselves to these
> extensible nodes:
>
> https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/arp_input.c#L143
> https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/ip_input.c#L124
> https://github.com/DPDK/grout/blob/v0.2/modules/ip6/datapath/ip6_input.c#L122
>
> After which, the extensible nodes can steer the packets towards the
> correct downstream edge based on the dedicated classifier field:
>
> https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L79
>
> Obviously, this does not natively support a per-interface sub tree
> traversal, but it can be done in the originating node based on packet
> private context data.

Nitin> Expressing per interface sub-tree traversal is the key
functionality of feature arc.

>
> This raises a more important question: how can we standardize the way
> private application data is passed from node to node? And how could we
> enforce this declaratively in the node register API?

Nitin> What you are suggesting here (node to node application data
exchange) can be done in rte_node_register API but, IMO, this is not
related to feature arc.
Feature arc is not just between parent and child nodes but also
between siblings (as explained above)

>
> Do you think we could find some middle ground that would not require
> such extensive changes?

Nitin> If you are pointing to ipv4-rewrite changes, I had an internal
review comment of adding those changes without any *performance
regression*.
A sub-optimal code would be much simpler but at the cost of performance.

>
> Cheers,
> Robin
>

^ permalink raw reply	[relevance 0%]

* Re: [EXTERNAL] Re: [RFC PATCH 0/3] add feature arc in rte_graph
  2024-10-16 13:50  0%         ` Nitin Saxena
@ 2024-10-17  7:03  0%           ` Nitin Saxena
  0 siblings, 0 replies; 169+ results
From: Nitin Saxena @ 2024-10-17  7:03 UTC (permalink / raw)
  To: Robin Jarry
  Cc: David Marchand, Nitin Saxena, Jerin Jacob,
	Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram, Zhirun Yan,
	dev, Christophe Fontaine

Hi Robin/David and all,

We realized the feature arc patch series is difficult to understand as
a new concept. Our objectives are following with feature arc changes

1. Allow reusability of standard DPDK nodes (defined in lib/nodes/*)
with out-of-tree applications (like grout). Currently out-of-tree
graph applications are duplicating standard nodes but not reusing the
standard ones
which are available. In the long term, we would like to mature
standard DPDK nodes with flexibility of hooking them to out-of-tree
application nodes.

2. Flexibility to enable/disable sub-graphs per interface based on the
runtime configuration updates. Protocol sub-graphs can be selectively
enabled for few (or all interfaces) at runtime

3. More than one sub-graphs/features can be enabled on an interface.
So a packet has to follow a sequential ordering node path on worker
cores.
Packets may need to move from one sub-graph to another sub-graph per interface

4. Last but not least, an optimized implementation which does not (or
minimally) stop worker cores for any control plane runtime updates.
Any performance regression should also be avoided

I am planning to create a draft presentation on feature arc which I
can share, when ready, to discuss. If needed, I can also plan to
present that in one of the DPDK community meetings.
Their we can also discuss if there are any alternatives of achieving
above objectives

Thanks,
Nitin
.
On Wed, Oct 16, 2024 at 7:20 PM Nitin Saxena <nsaxena16@gmail.com> wrote:
>
> Hi Robin,
>
> Thanks for the review
> Please see my replies inline
>
> Thanks,
> Nitin
>
> On Wed, Oct 16, 2024 at 3:08 PM Robin Jarry <rjarry@redhat.com> wrote:
> >
> > Hi folks,
> >
> > David Marchand, Oct 16, 2024 at 11:24:
> > > On Mon, Oct 14, 2024 at 1:12 PM Nitin Saxena <nsaxena@marvell.com> wrote:
> > >> I had pushed non RFC patch series before -rc1 date (11th oct).
> > >> We have an ABI change in this patch series https://patches.dpdk.org/project/dpdk/patch/20241010133111.2764712-3-nsaxena@marvell.com/
> > >> Could you help merge this patch series in rc2 otherwise it has to wait for next LTS
> > >
> > > Just read through the series, I am not confident with this addition.
> > > It requires a lot of changes in the node code for supporting it, where
> > > it should be something handled in/facilitated by the graph library
> > > itself.
> >
> > As far as I can tell, it will be very complicated (if not impossible) to
> > determine in a generic manner whether a packet must be steered towards
> > a sub tree or not. The decision *must* come from the originating node in
> > some way or another.
>
> Nitin> I am not sure if it *must* always be from the originating node?
> What about a control plane which wants to enable "IP4 feature" on
> interface  'X'  by assigning IP address?
> A originating node (say: ip4-input) *must not* activate IP4 lookup
> sub-graph for interface "X " until control plane assigns any IP
> address to it.
>
> Regarding the complexity of adopting feature arc changes in fast path,
> - a sub-optimal change for feature-arc would be simple and trivial but
> at the cost of performance.
> - Complexity increases when feature arc changes are optimally
> integrated (like "ip4_rewrite" changes in the patch) with no
> performance regression
>
> >
> > > I did not read much from Robin or Christophe who have been writing
> > > more node code than me.
> > > I would prefer their opinion before going forward.
> >
> > This series is indeed very dense. I like the concept of having
> > extensible sub trees in the graph but it feels like the implementation
> > is more complex than it should be.
> >
> > Lacking of another solution, we went for a naive approach in grout.
> > Basically, some nodes have undefined next nodes which are extended using
> > a dedicated API.
>
> Nitin> With an initial glance, it looks like "grout" is trying to
> solve a use-case where a child is being added to the parent's
> undefined next node. This is trying to create a runtime  parent-child
> relationship
>
> On the other hand, feature arc not just create parent-child
> relationships but also sibling-sibling relationships as well. Also
> enabling sub-graph per interface is critical functionality in feature
> arc that adds complexity
>
> Let's assume a use-case in ingress direction, at the IPv4 layer,
> where IPv4-input is the *originating node* and
>
> - On interface X, IPsec-policy, IP4-classify() and IPv4-lookup
> sub-graphs are enabled in a sequential order
> - On interface Y, IP4-classify() and IPv4-lookup sub-graphs are
> enabled. in a sequential order. i.e. IPsec-policy is *disabled* on
> interface Y
>
> In fast path, following processing should happen for "mbuf0" which is
> received on interface "X"
> - "ipv4-input" sends mbuf0 to the first enabled sub-graph node for
> interface X, "IPsec-policy"
> - In "IPsec-policy" node processing, if policy action results in
> "bypass" action for mbuf0, it must then be sent to next enabled
> sub-graph  i.e. "IPv4-classify" (from "IPsec-policy" node)
> - In "IPv4-classify" node processing, if classify fails for mbuf0 then
> it should finally be sent to "IPv4-lookup" node (from "IPv4-classify"
> node)
>
> whereas for "mbuf1" received on interface Y following fast path
> processing must happen
> - "Ipv4-input" sends mbuf1 to the first enabled sub-graph node for
> interface Y, "IPv4-classify"
> - If "IPv4-classify" fails for mbuf1, then it should finally be sent
> to IPv4-lookup node
>
> To behave differently for interface X and interface Y as above
> - First of all, IPsec-policy/IPv4-classify/IPv4-lookup must be
> connected to "ipv4-input" node (Parent-Child relationship)
> - Also, IPsec-policy/IPv4-classify/IPv4-lookup must also be connected
> with each other (Sibling-Sibling relationship)
> - Fast path APIs provide *rte_edges_t* to send mbuf from one node to
> another node
>    1. Based on interface (either Interface X or Interface Y)
>    2. Based on which node, fast path APIs are called. Next enabled
> feature/sub-graph can only be determined from previous enabled
> feature/sub-graph in fast path
>
> Not sure if grout handles above use-cases in the same manner. AFAIR ,
> for any control plane change grout re-creates "graph" objects which
> may not be required with feature arc.
>
> >
> > https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L23-L31
> >
> > This API can be used by other nodes to attach themselves to these
> > extensible nodes:
> >
> > https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/arp_input.c#L143
> > https://github.com/DPDK/grout/blob/v0.2/modules/ip/datapath/ip_input.c#L124
> > https://github.com/DPDK/grout/blob/v0.2/modules/ip6/datapath/ip6_input.c#L122
> >
> > After which, the extensible nodes can steer the packets towards the
> > correct downstream edge based on the dedicated classifier field:
> >
> > https://github.com/DPDK/grout/blob/v0.2/modules/infra/datapath/eth_input.c#L79
> >
> > Obviously, this does not natively support a per-interface sub tree
> > traversal, but it can be done in the originating node based on packet
> > private context data.
>
> Nitin> Expressing per interface sub-tree traversal is the key
> functionality of feature arc.
>
> >
> > This raises a more important question: how can we standardize the way
> > private application data is passed from node to node? And how could we
> > enforce this declaratively in the node register API?
>
> Nitin> What you are suggesting here (node to node application data
> exchange) can be done in rte_node_register API but, IMO, this is not
> related to feature arc.
> Feature arc is not just between parent and child nodes but also
> between siblings (as explained above)
>
> >
> > Do you think we could find some middle ground that would not require
> > such extensive changes?
>
> Nitin> If you are pointing to ipv4-rewrite changes, I had an internal
> review comment of adding those changes without any *performance
> regression*.
> A sub-optimal code would be much simpler but at the cost of performance.
>
> >
> > Cheers,
> > Robin
> >

^ permalink raw reply	[relevance 0%]

* Re: [PATCH v5 0/5] power: refactor power management library
  @ 2024-10-17 16:17  3%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-17 16:17 UTC (permalink / raw)
  To: Sivaprasad Tummala
  Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
	cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, dev

On Thu, 17 Oct 2024 10:26:44 +0000
Sivaprasad Tummala <sivaprasad.tummala@amd.com> wrote:

> This patchset refactors the power management library, addressing both
> core and uncore power management. The primary changes involve the
> creation of dedicated directories for each driver within
> 'drivers/power/core/*' and 'drivers/power/uncore/*'.
> 
> This refactor significantly improves code organization, enhances
> clarity, and boosts maintainability. It lays the foundation for more
> focused development on individual drivers and facilitates seamless
> integration of future enhancements, particularly the AMD uncore driver.
> 
> Furthermore, this effort aims to streamline code maintenance by
> consolidating common functions for cpufreq and cppc across various
> core drivers, thus reducing code duplication.


Does not build.

*Build Failed #2:
OS: RHEL94-64
Target: x86_64-native-linuxapp-gcc
FAILED: examples/dpdk-distributor.p/distributor_main.c.o 
gcc -Iexamples/dpdk-distributor.p -Iexamples -I../examples -Iexamples/distributor -I../examples/distributor -I../examples/common -I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -Ilib/metrics -I../lib/metrics -Ilib/telemetry -I../lib/telemetry -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring -Ilib/net -I../lib/net -Ilib/mbuf -I../lib/mbuf -Ilib/ethdev -I../lib/ethdev -Ilib/meter -I../lib/meter -Ilib/cmdline -I../lib/cmdline -Ilib/distributor -I../lib/distributor -Ilib/power -I../lib/power -Ilib/timer -I../lib/timer -fdiagnostics-color=always -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra -Werror -std=c11 -O3 -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-packed-not-aligned -Wno-missing-field-initializers -Wno-zero-length-bounds -D_GNU_SOURCE -march=native -mrtm -Wno-format-truncation -DALLOW_EXPERIMENTAL_API -MD -MQ examples/dpdk-distributor.p/distributor_main.c.o -MF examples/dpdk-distributor.p/distributor_main.c.o.d -o examples/dpdk-distributor.p/distributor_main.c.o -c ../examples/distributor/main.c
In file included from ../examples/distributor/main.c:20:
In function ‘rte_power_get_capabilities’,
    inlined from ‘main’ at ../examples/distributor/main.c:888:4:
../lib/power/rte_power.h:285:42: error: call to ‘rte_power_get_core_ops’ declared with attribute error: Symbol is not public ABI
  285 |         struct rte_power_core_ops *ops = rte_power_get_core_ops();
      |                                          ^~~~~~~~~~~~~~~~~~~~~~~~
[2962/3118] Compiling C object examples/dpdk-fips_validation.p/fips_validation_fips_validation_hmac.c.o
[2963/3118] Compiling C object examples/dpdk-bbdev_app.p/bbdev_app_main.c.o
[2964/3118] Compiling C object examples/dpdk-fips_validation.p/fips_validation_fips_validation_xts.c.o
[2965/3118] Compiling C object examples/dpdk-fips_validation.p/fips_validation_fips_validation_sha.c.o
[2966/3118] Linking target examples/dpdk-bond
[2967/3118] Compiling C object examples/dpdk-fips_validation.p/fips_validation_main.c.o
[2968/3118] Compiling C object app/dpdk-test.p/test_test_ring_perf.c.o
[2969/3118] Compiling C object app/dpdk-test.p/test_test_trace_perf.c.o
[2970/3118] Compiling C object app/dpdk-test.p/test_test_ring.c.o
ninja: build stopped

^ permalink raw reply	[relevance 3%]

* Updated Invitation: Adding support for PCIe steering tags in DPDK
@ 2024-10-17 15:22  2% Data Plane Development Kit - Meetings
  0 siblings, 0 replies; 169+ results
From: Data Plane Development Kit - Meetings @ 2024-10-17 15:22 UTC (permalink / raw)
  To: dev

[-- Attachment #1: Type: text/html, Size: 4155 bytes --]

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: invite.ics --]
[-- Type: text/calendar; method=REQUEST, Size: 5499 bytes --]

BEGIN:VCALENDAR
METHOD:REQUEST
PRODID:-//Linux Foundation//Meeting Management
VERSION:2.0
BEGIN:VTIMEZONE
TZID:America/Chicago
LAST-MODIFIED:20221029T021029Z
TZURL:http://tzurl.org/zoneinfo/America/Chicago
X-LIC-LOCATION:America/Chicago
X-PROLEPTIC-TZNAME:LMT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-055036
TZOFFSETTO:-0600
DTSTART:18831118T120924
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19180331T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU;UNTIL=19190330T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19181027T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=19211030T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19200613T020000
RDATE:19210327T020000
RDATE:19740106T020000
RDATE:19750223T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19220430T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19350428T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19220924T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19350929T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:EST
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19360301T020000
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19361115T020000
RDATE:19450930T020000
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19370425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19410427T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19370926T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19410928T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CWT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19420209T020000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CPT
TZOFFSETFROM:-0500
TZOFFSETTO:-0500
DTSTART:19450814T180000
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19460428T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19730429T080000Z
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19460929T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU;UNTIL=19540926T070000Z
END:STANDARD
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:19551030T020000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU;UNTIL=20061029T070000Z
END:STANDARD
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19760425T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=-1SU;UNTIL=19860427T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU;UNTIL=20060402T080000Z
END:DAYLIGHT
BEGIN:DAYLIGHT
TZNAME:CDT
TZOFFSETFROM:-0600
TZOFFSETTO:-0500
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
END:DAYLIGHT
BEGIN:STANDARD
TZNAME:CST
TZOFFSETFROM:-0500
TZOFFSETTO:-0600
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
ATTENDEE;VALUE=TEXT:dev@dpdk.org
CREATED;TZID=America/Chicago:20241017T102248
DESCRIPTION:\nYou have been invited to a meeting for Data Plane Development Kit (DPDK)\n\nWe discussed adding the PCIe steering tag support to DPDK. This feature allows for stashing the descriptors and packet data closer to the CPUs\, possibly allowing for lower latency and higher throughput. This feature requires contributions from CPU vendors and NIC vendors. The goal of the meeting is to present the next version of the API and seek support for implementation from other participants in the community. Agenda:\n- Brief introduction to the feature\n- Introduce the APIs from RFC v2 (this will be submitted to the community before the call)\n- Dependencies on kernel support - API for reading steering tags\n- Addressing ABI in advance as patches will not be ready by 24.11\n\nWays to join meeting:\n\n1. Join from PC\, Mac\, iPad\, or Android\n\nhttps://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e\n\n2. Join via audio\n\nOne tap mobile:\nUS (iOS): +12532158782\,\,94917063595#\,\,\,\,*270522# or +13462487799\,\,94917063595#\,\,\,\,*270522#\nUS (Android): +12532158782\;94917063595#\;270522# or +13462487799\;94917063595#\;270522#\n\nOr dial:\nUS: +1 253 215 8782 or +1 346 248 7799 or +1 669 900 6833 or +1 301 715 8592 or +1 312 626 6799 or +1 646 374 8656 or 877 369 0926 (Toll Free) or 855 880 1246 (Toll Free)\nCanada: +1 647 374 4685 or +1 647 558 0588 or +1 778 907 2071 or +1 204 272 7920 or +1 438 809 7799 or +1 587 328 1099 or 855 703 8985 (Toll Free)\n\nMeeting ID: 94917063595\n\nMeeting Passcode: 270522\n\n\nInternational numbers: https://zoom.us/u/alwnPIaVT\n
DTEND;TZID=America/Chicago:20241023T100000
DTSTAMP;TZID=America/Chicago:20241017T102248
DTSTART;TZID=America/Chicago:20241023T090000
LAST-MODIFIED;TZID=America/Chicago:20241017T102248
LOCATION:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
ORGANIZER;CN=Data Plane Development Kit (DPDK):MAILTO:meetings@lfx.dev
SUMMARY:Adding support for PCIe steering tags in DPDK
TZID:America/Chicago
TZNAME:America/Chicago
UID:94917063595
URL;VALUE=TEXT:https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e
X-MEETING-ID:94917063595
X-OCCURRENCE-ID:
X-REGISTRANT-ID:08cdd9b7-dfa9-42ea-a636-f8eaedece700
END:VEVENT
END:VCALENDAR

^ permalink raw reply	[relevance 2%]

* DPDK - PCIe Steering Tags Meeting on 10/23/24
@ 2024-10-17 19:56  3% Wathsala Wathawana Vithanage
  2024-10-21  2:05  0% ` Wathsala Wathawana Vithanage
  0 siblings, 1 reply; 169+ results
From: Wathsala Wathawana Vithanage @ 2024-10-17 19:56 UTC (permalink / raw)
  To: dev, Nathan Southern, thomas, Honnappa Nagarahalli; +Cc: Dhruv Tripathi, nd

Hi all,
 
This is an invitation to discuss adding PCIe steering tags support to DPDK.
We have had brief conversations over the idea at the DPDK summit.
Steering tags allows stashing of descriptors and packet data closer to the CPUs, possibly allowing for lower latency and higher throughput. 
This feature requires contributions from CPU vendors and NIC vendors. 
The goal of the meeting is to present the next version of the API and seek support for implementation from other participants in the community. 

I will be sending out the RFC some time this week, so there will be a plenty of time before the meeting to go over it.

Agenda:
- Brief introduction to the feature
- Introduce the APIs from RFC v2 (this will be submitted to the community before the call)
- Dependencies on kernel support - API for reading steering tags
- Addressing ABI in advance as patches will not be ready by 24.11

Please join the call if you are interested in the topic.
LXF meeting registration ink: https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e&invite=true

Thanks.

--wathsala


^ permalink raw reply	[relevance 3%]

* Community Call for Adding Support of PCIe Steering Tags Support in DPDK
@ 2024-10-17 21:32  3% Wathsala Wathawana Vithanage
  0 siblings, 0 replies; 169+ results
From: Wathsala Wathawana Vithanage @ 2024-10-17 21:32 UTC (permalink / raw)
  To: announce

A DPDK community call on adding support for PCIe steering tags is scheduled for 10/23/24 at 9AM CST.
Steering tags allow for the stashing of descriptors and packet data closer to the CPUs, possibly allowing for lower latency and higher throughput.
This feature requires contributions from CPU vendors and NIC vendors.
The meeting's goal is to present the next version of the API and seek support for its implementation from other community participants.

Agenda:
- Brief introduction to the feature
- Introduce the APIs from RFC v2 (this will be submitted to the community before the call)
- Dependencies on kernel support - API for reading steering tags
- Addressing ABI in advance as patches will not be ready by 24.11

LXF meeting registration link: https://zoom-lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f36625-ad41-4b9c-b067-d33e68c3a29e&invite=true
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

^ permalink raw reply	[relevance 3%]

* Re: [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port
  @ 2024-10-18  1:04  0%     ` Ferruh Yigit
  2024-10-18  2:48  0%       ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: Ferruh Yigit @ 2024-10-18  1:04 UTC (permalink / raw)
  To: lihuisong (C), thomas, andrew.rybchenko; +Cc: dev, fengchengwen, liuyonglong

On 10/8/2024 3:32 AM, lihuisong (C) wrote:
> Hi Thomas and Ferruh,
> 
> We've discussed it on and off a few times, and we've reached some
> consensus.
> They've been going through more than 2 years😅
> Can you have a look at this series again?
> If we really don't need it, I will drop it from my upstreaming list.
> 

Hi Huisong,

I was not really convinced with the patch series, but did not want to
block it outright, sorry that this caused patch series stay around.

As checked again, still feels like adding unnecessary complexity, and I
am for rejecting this series.

Overall target is to be able to support hotplug with primary/secondary
process, and uses event handlers for this but this requires adding a new
ethdev state to be able iterate over devices etc...
Perhaps better way to support this without relying on event handlers.


> /Huisong
> 
> 
> 在 2024/9/29 13:52, Huisong Li 写道:
>> This patchset fix some bugs and support attaching and detaching port
>> in primary and secondary.
>>
>> ---
>>   -v7: fix conflicts
>>   -v6: adjust rte_eth_dev_is_used position based on alphabetical order
>>        in version.map
>>   -v5: move 'ALLOCATED' state to the back of 'REMOVED' to avoid abi
>> break.
>>   -v4: fix a misspelling.
>>   -v3:
>>     #1 merge patch 1/6 and patch 2/6 into patch 1/5, and add modification
>>        for other bus type.
>>     #2 add a RTE_ETH_DEV_ALLOCATED state in rte_eth_dev_state to resolve
>>        the probelm in patch 2/5.
>>   -v2: resend due to CI unexplained failure.
>>
>> Huisong Li (5):
>>    drivers/bus: restore driver assignment at front of probing
>>    ethdev: fix skip valid port in probing callback
>>    app/testpmd: check the validity of the port
>>    app/testpmd: add attach and detach port for multiple process
>>    app/testpmd: stop forwarding in new or destroy event
>>
>>   app/test-pmd/testpmd.c                   | 47 +++++++++++++++---------
>>   app/test-pmd/testpmd.h                   |  1 -
>>   drivers/bus/auxiliary/auxiliary_common.c |  9 ++++-
>>   drivers/bus/dpaa/dpaa_bus.c              |  9 ++++-
>>   drivers/bus/fslmc/fslmc_bus.c            |  8 +++-
>>   drivers/bus/ifpga/ifpga_bus.c            | 12 ++++--
>>   drivers/bus/pci/pci_common.c             |  9 ++++-
>>   drivers/bus/vdev/vdev.c                  | 10 ++++-
>>   drivers/bus/vmbus/vmbus_common.c         |  9 ++++-
>>   drivers/net/bnxt/bnxt_ethdev.c           |  3 +-
>>   drivers/net/bonding/bonding_testpmd.c    |  1 -
>>   drivers/net/mlx5/mlx5.c                  |  2 +-
>>   lib/ethdev/ethdev_driver.c               | 13 +++++--
>>   lib/ethdev/ethdev_driver.h               | 12 ++++++
>>   lib/ethdev/ethdev_pci.h                  |  2 +-
>>   lib/ethdev/rte_class_eth.c               |  2 +-
>>   lib/ethdev/rte_ethdev.c                  |  4 +-
>>   lib/ethdev/rte_ethdev.h                  |  4 +-
>>   lib/ethdev/version.map                   |  1 +
>>   19 files changed, 114 insertions(+), 44 deletions(-)
>>


^ permalink raw reply	[relevance 0%]

* Re: [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port
  2024-10-18  1:04  0%     ` Ferruh Yigit
@ 2024-10-18  2:48  0%       ` lihuisong (C)
  2024-10-26  4:11  0%         ` lihuisong (C)
  2024-10-29 22:12  0%         ` Ferruh Yigit
  0 siblings, 2 replies; 169+ results
From: lihuisong (C) @ 2024-10-18  2:48 UTC (permalink / raw)
  To: Ferruh Yigit, thomas, andrew.rybchenko; +Cc: dev, fengchengwen, liuyonglong

Hi Ferruh,

Thanks for your considering again. please see reply inline.

在 2024/10/18 9:04, Ferruh Yigit 写道:
> On 10/8/2024 3:32 AM, lihuisong (C) wrote:
>> Hi Thomas and Ferruh,
>>
>> We've discussed it on and off a few times, and we've reached some
>> consensus.
>> They've been going through more than 2 years😅
>> Can you have a look at this series again?
>> If we really don't need it, I will drop it from my upstreaming list.
>>
> Hi Huisong,
>
> I was not really convinced with the patch series, but did not want to
> block it outright, sorry that this caused patch series stay around.
>
> As checked again, still feels like adding unnecessary complexity, and I
> am for rejecting this series.
>
> Overall target is to be able to support hotplug with primary/secondary
> process, and uses event handlers for this but this requires adding a new
> ethdev state to be able iterate over devices etc...
> Perhaps better way to support this without relying on event handlers.
Ignoring the modification of tesptmd is ok to me.
But we need to restrict testpmd not to support attach and detach port in 
multiple process case.
Otherwise. these issues this series solved will be encountered.

BTW, I want to say the patch [2/5] which introduced 
RTE_ETH_DEV_ALLOCATED should be thought again.
Because it is an real issue in ethdev layer. This is also the fruit that 
Thomas, you and I discussed before.
Please look at this patch again.

/Huisong
>
>
>> /Huisong
>>
>>
>> 在 2024/9/29 13:52, Huisong Li 写道:
>>> This patchset fix some bugs and support attaching and detaching port
>>> in primary and secondary.
>>>
>>> ---
>>>    -v7: fix conflicts
>>>    -v6: adjust rte_eth_dev_is_used position based on alphabetical order
>>>         in version.map
>>>    -v5: move 'ALLOCATED' state to the back of 'REMOVED' to avoid abi
>>> break.
>>>    -v4: fix a misspelling.
>>>    -v3:
>>>      #1 merge patch 1/6 and patch 2/6 into patch 1/5, and add modification
>>>         for other bus type.
>>>      #2 add a RTE_ETH_DEV_ALLOCATED state in rte_eth_dev_state to resolve
>>>         the probelm in patch 2/5.
>>>    -v2: resend due to CI unexplained failure.
>>>
>>> Huisong Li (5):
>>>     drivers/bus: restore driver assignment at front of probing
>>>     ethdev: fix skip valid port in probing callback
>>>     app/testpmd: check the validity of the port
>>>     app/testpmd: add attach and detach port for multiple process
>>>     app/testpmd: stop forwarding in new or destroy event
>>>
>>>    app/test-pmd/testpmd.c                   | 47 +++++++++++++++---------
>>>    app/test-pmd/testpmd.h                   |  1 -
>>>    drivers/bus/auxiliary/auxiliary_common.c |  9 ++++-
>>>    drivers/bus/dpaa/dpaa_bus.c              |  9 ++++-
>>>    drivers/bus/fslmc/fslmc_bus.c            |  8 +++-
>>>    drivers/bus/ifpga/ifpga_bus.c            | 12 ++++--
>>>    drivers/bus/pci/pci_common.c             |  9 ++++-
>>>    drivers/bus/vdev/vdev.c                  | 10 ++++-
>>>    drivers/bus/vmbus/vmbus_common.c         |  9 ++++-
>>>    drivers/net/bnxt/bnxt_ethdev.c           |  3 +-
>>>    drivers/net/bonding/bonding_testpmd.c    |  1 -
>>>    drivers/net/mlx5/mlx5.c                  |  2 +-
>>>    lib/ethdev/ethdev_driver.c               | 13 +++++--
>>>    lib/ethdev/ethdev_driver.h               | 12 ++++++
>>>    lib/ethdev/ethdev_pci.h                  |  2 +-
>>>    lib/ethdev/rte_class_eth.c               |  2 +-
>>>    lib/ethdev/rte_ethdev.c                  |  4 +-
>>>    lib/ethdev/rte_ethdev.h                  |  4 +-
>>>    lib/ethdev/version.map                   |  1 +
>>>    19 files changed, 114 insertions(+), 44 deletions(-)
>>>
> .

^ permalink raw reply	[relevance 0%]

* [PATCH dpdk v4 04/17] net: use IPv6 structure for packet headers
  @ 2024-10-18  9:17  1%   ` Robin Jarry
  2024-10-18  9:17  1%   ` [PATCH dpdk v4 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
  2024-10-18  9:17  2%   ` [PATCH dpdk v4 07/17] rib6: " Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18  9:17 UTC (permalink / raw)
  To: dev, Wisam Jaddo, Cristian Dumitrescu, Konstantin Ananyev,
	Yipeng Wang, Sameh Gobriel, Bruce Richardson, Vladimir Medvedkin,
	Ajit Khaparde, Somnath Kotur, Chas Williams, Min Hu (Connor),
	Potnuri Bharat Teja, Hemant Agrawal, Sachin Saxena, Ziyang Xuan,
	Xiaoyun Wang, Jie Hai, Yisen Zhuang, Jingjing Wu,
	Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam,
	Suanming Mou, Matan Azrad, Liron Himi, Chaoyong He,
	Devendra Singh Rawat, Alok Prasad, Andrew Rybchenko,
	Stephen Hemminger, Jiawen Wu, Jian Wang, Radu Nicolau,
	Akhil Goyal, Thomas Monjalon, Ferruh Yigit, Nithin Dabilpuram,
	Pavan Nikhilesh

The rte_ipv6_hdr uses ad-hoc uint8_t[16] arrays to represent addresses.
Replace these arrays with the newly added rte_ipv6_addr structure. Adapt
all code accordingly.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test-flow-perf/items_gen.c           |  4 +--
 app/test-pipeline/pipeline_hash.c        |  4 +--
 app/test/packet_burst_generator.c        |  4 +--
 app/test/test_ipfrag.c                   |  4 +--
 app/test/test_reassembly_perf.c          | 23 +++++++-------
 app/test/test_thash.c                    |  8 ++---
 doc/guides/rel_notes/deprecation.rst     |  2 --
 doc/guides/rel_notes/release_24_11.rst   |  6 ++++
 drivers/net/bnxt/bnxt_flow.c             | 12 ++++----
 drivers/net/bonding/rte_eth_bond_pmd.c   |  6 ++--
 drivers/net/cxgbe/cxgbe_flow.c           | 14 ++++-----
 drivers/net/dpaa2/dpaa2_flow.c           | 22 +++++++-------
 drivers/net/hinic/hinic_pmd_flow.c       |  6 ++--
 drivers/net/hinic/hinic_pmd_tx.c         |  2 +-
 drivers/net/hns3/hns3_flow.c             |  8 ++---
 drivers/net/i40e/i40e_flow.c             | 12 ++++----
 drivers/net/iavf/iavf_fdir.c             |  8 ++---
 drivers/net/iavf/iavf_fsub.c             |  8 ++---
 drivers/net/iavf/iavf_ipsec_crypto.c     |  6 ++--
 drivers/net/ice/ice_fdir_filter.c        | 12 ++++----
 drivers/net/ice/ice_switch_filter.c      | 16 +++++-----
 drivers/net/igc/igc_flow.c               |  4 +--
 drivers/net/ixgbe/ixgbe_flow.c           | 12 ++++----
 drivers/net/ixgbe/ixgbe_ipsec.c          |  4 +--
 drivers/net/mlx5/hws/mlx5dr_definer.c    | 36 +++++++++++-----------
 drivers/net/mlx5/mlx5_flow.c             |  6 ++--
 drivers/net/mlx5/mlx5_flow_dv.c          | 16 ++++------
 drivers/net/mlx5/mlx5_flow_hw.c          | 10 +++----
 drivers/net/mlx5/mlx5_flow_verbs.c       |  8 ++---
 drivers/net/mvpp2/mrvl_flow.c            | 16 ++++------
 drivers/net/nfp/flower/nfp_flower_flow.c | 32 ++++++++++----------
 drivers/net/nfp/nfp_net_flow.c           | 38 +++++++++++-------------
 drivers/net/qede/qede_filter.c           |  4 +--
 drivers/net/sfc/sfc_flow.c               | 22 +++++---------
 drivers/net/tap/tap_flow.c               | 10 +++----
 drivers/net/txgbe/txgbe_flow.c           | 12 ++++----
 drivers/net/txgbe/txgbe_ipsec.c          |  4 +--
 examples/ip_fragmentation/main.c         |  2 +-
 examples/ip_pipeline/pipeline.c          | 16 +++++-----
 examples/ip_reassembly/main.c            |  2 +-
 examples/ipsec-secgw/flow.c              | 33 ++++----------------
 examples/ipsec-secgw/ipsec.c             |  8 ++---
 examples/ipsec-secgw/sa.c                |  4 +--
 examples/ipsec-secgw/sad.h               | 10 ++++---
 examples/l3fwd/l3fwd_fib.c               |  2 +-
 examples/l3fwd/l3fwd_lpm.c               |  4 +--
 lib/ethdev/rte_flow.h                    |  6 ++--
 lib/hash/rte_thash.h                     | 12 ++++----
 lib/ip_frag/rte_ipv6_reassembly.c        |  4 +--
 lib/net/rte_ip6.h                        |  6 ++--
 lib/node/ip6_lookup.c                    | 10 +++----
 lib/pipeline/rte_swx_ipsec.c             |  6 ++--
 lib/pipeline/rte_table_action.c          | 24 +++++++--------
 53 files changed, 260 insertions(+), 310 deletions(-)

diff --git a/app/test-flow-perf/items_gen.c b/app/test-flow-perf/items_gen.c
index 4ae72509d445..c740e1838ffb 100644
--- a/app/test-flow-perf/items_gen.c
+++ b/app/test-flow-perf/items_gen.c
@@ -78,8 +78,8 @@ add_ipv6(struct rte_flow_item *items,
 	for (i = 0; i < 16; i++) {
 		/* Currently src_ip is limited to 32 bit */
 		if (i < 4)
-			ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i * 8);
-		ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
+			ipv6_specs[ti].hdr.src_addr.a[15 - i] = para.src_ip >> (i * 8);
+		ipv6_masks[ti].hdr.src_addr.a[15 - i] = 0xff;
 	}
 
 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
diff --git a/app/test-pipeline/pipeline_hash.c b/app/test-pipeline/pipeline_hash.c
index cab9c2098014..194e5c5dcc53 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/app/test-pipeline/pipeline_hash.c
@@ -432,7 +432,6 @@ app_main_loop_rx_metadata(void) {
 				struct rte_ipv4_hdr *ip_hdr;
 				struct rte_ipv6_hdr *ipv6_hdr;
 				uint32_t ip_dst;
-				uint8_t *ipv6_dst;
 				uint32_t *signature, *k32;
 
 				m = app.mbuf_rx.array[j];
@@ -452,9 +451,8 @@ app_main_loop_rx_metadata(void) {
 				} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
 					ipv6_hdr = (struct rte_ipv6_hdr *)
 						&m_data[sizeof(struct rte_ether_hdr)];
-					ipv6_dst = ipv6_hdr->dst_addr;
 
-					memcpy(key, ipv6_dst, 16);
+					memcpy(key, &ipv6_hdr->dst_addr, 16);
 				} else
 					continue;
 
diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 867a88da0055..c9ff5257f070 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -148,8 +148,8 @@ initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
 	ip_hdr->proto = IPPROTO_UDP;
 	ip_hdr->hop_limits = IP_DEFTTL;
 
-	rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
-	rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
+	rte_memcpy(&ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
+	rte_memcpy(&ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
 
 	return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
 }
diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index 8e4df220a214..18d672715729 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -238,8 +238,8 @@ v6_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s, uint8_t ttl,
 	hdr->proto = proto;
 	hdr->hop_limits = ttl;
 
-	memset(hdr->src_addr, 0x08, sizeof(hdr->src_addr));
-	memset(hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
+	memset(&hdr->src_addr, 0x08, sizeof(hdr->src_addr));
+	memset(&hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
 }
 
 static inline void
diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c
index 3912179022fc..15db19add917 100644
--- a/app/test/test_reassembly_perf.c
+++ b/app/test/test_reassembly_perf.c
@@ -8,6 +8,7 @@
 #include <rte_ether.h>
 #include <rte_hexdump.h>
 #include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_ip_frag.h>
 #include <rte_mbuf.h>
 #include <rte_mbuf_pool_ops.h>
@@ -36,7 +37,7 @@
 #define IP_DST_ADDR(x) ((198U << 24) | (18 << 16) | (1 << 15) | (x))
 
 /* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
-static uint8_t ip6_addr[16] = {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct rte_ipv6_addr ip6_addr = RTE_IPV6(0x2001, 0x0200, 0, 0, 0, 0, 0, 0);
 #define IP6_VERSION 6
 
 #define IP_DEFTTL 64 /* from RFC 1340. */
@@ -340,17 +341,17 @@ ipv6_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id,
 			rte_cpu_to_be_16(pkt_len - sizeof(struct rte_ipv6_hdr));
 		ip_hdr->proto = IPPROTO_FRAGMENT;
 		ip_hdr->hop_limits = IP_DEFTTL;
-		memcpy(ip_hdr->src_addr, ip6_addr, sizeof(ip_hdr->src_addr));
-		memcpy(ip_hdr->dst_addr, ip6_addr, sizeof(ip_hdr->dst_addr));
-		ip_hdr->src_addr[7] = (flow_id >> 16) & 0xf;
-		ip_hdr->src_addr[7] |= 0x10;
-		ip_hdr->src_addr[8] = (flow_id >> 8) & 0xff;
-		ip_hdr->src_addr[9] = flow_id & 0xff;
+		ip_hdr->src_addr = ip6_addr;
+		ip_hdr->dst_addr = ip6_addr;
+		ip_hdr->src_addr.a[7] = (flow_id >> 16) & 0xf;
+		ip_hdr->src_addr.a[7] |= 0x10;
+		ip_hdr->src_addr.a[8] = (flow_id >> 8) & 0xff;
+		ip_hdr->src_addr.a[9] = flow_id & 0xff;
 
-		ip_hdr->dst_addr[7] = (flow_id >> 16) & 0xf;
-		ip_hdr->dst_addr[7] |= 0x20;
-		ip_hdr->dst_addr[8] = (flow_id >> 8) & 0xff;
-		ip_hdr->dst_addr[9] = flow_id & 0xff;
+		ip_hdr->dst_addr.a[7] = (flow_id >> 16) & 0xf;
+		ip_hdr->dst_addr.a[7] |= 0x20;
+		ip_hdr->dst_addr.a[8] = (flow_id >> 8) & 0xff;
+		ip_hdr->dst_addr.a[9] = flow_id & 0xff;
 
 		frag_hdr->next_header = IPPROTO_UDP;
 		frag_hdr->reserved = 0;
diff --git a/app/test/test_thash.c b/app/test/test_thash.c
index 65d42fd90085..952da6a52954 100644
--- a/app/test/test_thash.c
+++ b/app/test/test_thash.c
@@ -145,10 +145,10 @@ test_toeplitz_hash_calc(void)
 	}
 	for (i = 0; i < RTE_DIM(v6_tbl); i++) {
 		/*Fill ipv6 hdr*/
-		for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr); j++)
-			ipv6_hdr.src_addr[j] = v6_tbl[i].src_ip[j];
-		for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr); j++)
-			ipv6_hdr.dst_addr[j] = v6_tbl[i].dst_ip[j];
+		for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr.a); j++)
+			ipv6_hdr.src_addr.a[j] = v6_tbl[i].src_ip[j];
+		for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr.a); j++)
+			ipv6_hdr.dst_addr.a[j] = v6_tbl[i].dst_ip[j];
 		/*Load and convert ipv6 address into tuple*/
 		rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
 		tuple.v6.sport = v6_tbl[i].src_port;
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 20fcfedb7b89..830904203c38 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -87,8 +87,6 @@ Deprecation Notices
     - ``rte_lpm6_delete_bulk_func()``
     - ``rte_lpm6_lookup()``
     - ``rte_lpm6_lookup_bulk_func()``
-  net
-    - ``struct rte_ipv6_hdr``
   node
     - ``rte_node_ip6_route_add()``
   pipeline
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index e68676caf029..de24705ef662 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -286,6 +286,12 @@ API Changes
 * drivers/net/ena: Removed ``enable_llq``, ``normal_llq_hdr`` and ``large_llq_hdr`` devargs
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
+* net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays.
+  The following libraries and symbols were modified:
+
+  net
+    - ``struct rte_ipv6_hdr``
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 03413e912149..c41403c753cf 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -424,22 +424,22 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
 
 			rte_memcpy(filter->src_ipaddr,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(filter->dst_ipaddr,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 
-			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr.a,
 						   16)) {
 				rte_memcpy(filter->src_ipaddr_mask,
-					   ipv6_mask->hdr.src_addr, 16);
+					   &ipv6_mask->hdr.src_addr, 16);
 				en |= !use_ntuple ? 0 :
 				    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
 			}
 
-			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr.a,
 						   16)) {
 				rte_memcpy(filter->dst_ipaddr_mask,
-					   ipv6_mask->hdr.dst_addr, 16);
+					   &ipv6_mask->hdr.dst_addr, 16);
 				en |= !use_ntuple ? 0 :
 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
 			}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 34131f0e35f6..cda1c37124f4 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -689,10 +689,8 @@ ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
 static inline uint32_t
 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
 {
-	unaligned_uint32_t *word_src_addr =
-		(unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
-	unaligned_uint32_t *word_dst_addr =
-		(unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
+	unaligned_uint32_t *word_src_addr = (unaligned_uint32_t *)&ipv6_hdr->src_addr;
+	unaligned_uint32_t *word_dst_addr = (unaligned_uint32_t *)&ipv6_hdr->dst_addr;
 
 	return (word_src_addr[0] ^ word_dst_addr[0]) ^
 			(word_src_addr[1] ^ word_dst_addr[1]) ^
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 40d21e694409..b6d169097c1a 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -411,15 +411,15 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
 			      RTE_IPV6_HDR_TC_SHIFT,
 			      tos);
 
-	if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
+	if (memcmp(&val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
 	    (umask &&
-	     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
+	     memcmp(&umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
 				     lip);
 
-	if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
+	if (memcmp(&val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
 	    (umask &&
-	     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
+	     memcmp(&umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
 				     fip);
 
@@ -918,10 +918,8 @@ static struct chrte_fparse parseitem[] = {
 		.fptr  = ch_rte_parsetype_ipv6,
 		.dmask = &(const struct rte_flow_item_ipv6) {
 			.hdr = {
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 				.vtc_flow = RTE_BE32(0xff000000),
 			},
 		},
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 1b55d8dd173b..54b17e97c031 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -117,10 +117,8 @@ static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
 
 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 		.proto = 0xff
 	},
 };
@@ -1478,16 +1476,16 @@ dpaa2_configure_flow_generic_ip(
 		mask_ipv4->hdr.dst_addr)) {
 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
 	} else if (mask_ipv6 &&
-		(memcmp((const char *)mask_ipv6->hdr.src_addr,
+		(memcmp(&mask_ipv6->hdr.src_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
-		memcmp((const char *)mask_ipv6->hdr.dst_addr,
+		memcmp(&mask_ipv6->hdr.dst_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
 	}
 
 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
 		(mask_ipv6 &&
-			memcmp((const char *)mask_ipv6->hdr.src_addr,
+			memcmp(&mask_ipv6->hdr.src_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		index = dpaa2_flow_extract_search(
 				&priv->extract.qos_key_extract.dpkg,
@@ -1526,13 +1524,13 @@ dpaa2_configure_flow_generic_ip(
 		if (spec_ipv4)
 			key = &spec_ipv4->hdr.src_addr;
 		else
-			key = &spec_ipv6->hdr.src_addr[0];
+			key = &spec_ipv6->hdr.src_addr;
 		if (mask_ipv4) {
 			mask = &mask_ipv4->hdr.src_addr;
 			size = NH_FLD_IPV4_ADDR_SIZE;
 			prot = NET_PROT_IPV4;
 		} else {
-			mask = &mask_ipv6->hdr.src_addr[0];
+			mask = &mask_ipv6->hdr.src_addr;
 			size = NH_FLD_IPV6_ADDR_SIZE;
 			prot = NET_PROT_IPV6;
 		}
@@ -1569,7 +1567,7 @@ dpaa2_configure_flow_generic_ip(
 
 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
 		(mask_ipv6 &&
-			memcmp((const char *)mask_ipv6->hdr.dst_addr,
+			memcmp(&mask_ipv6->hdr.dst_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		index = dpaa2_flow_extract_search(
 				&priv->extract.qos_key_extract.dpkg,
@@ -1616,13 +1614,13 @@ dpaa2_configure_flow_generic_ip(
 		if (spec_ipv4)
 			key = &spec_ipv4->hdr.dst_addr;
 		else
-			key = spec_ipv6->hdr.dst_addr;
+			key = &spec_ipv6->hdr.dst_addr;
 		if (mask_ipv4) {
 			mask = &mask_ipv4->hdr.dst_addr;
 			size = NH_FLD_IPV4_ADDR_SIZE;
 			prot = NET_PROT_IPV4;
 		} else {
-			mask = &mask_ipv6->hdr.dst_addr[0];
+			mask = &mask_ipv6->hdr.dst_addr;
 			size = NH_FLD_IPV6_ADDR_SIZE;
 			prot = NET_PROT_IPV6;
 		}
diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c
index d1a564a16303..8fdd5a35be9f 100644
--- a/drivers/net/hinic/hinic_pmd_flow.c
+++ b/drivers/net/hinic/hinic_pmd_flow.c
@@ -962,7 +962,7 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 
 		/* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
 		for (i = 0; i < 16; i++) {
-			if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
+			if (ipv6_mask->hdr.src_addr.a[i] == UINT8_MAX) {
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM, item,
 					"Not supported by fdir filter, do not support src ipv6");
@@ -978,13 +978,13 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 		}
 
 		for (i = 0; i < 16; i++) {
-			if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
+			if (ipv6_mask->hdr.dst_addr.a[i] == UINT8_MAX)
 				rule->mask.dst_ipv6_mask |= 1 << i;
 		}
 
 		ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
 		rte_memcpy(rule->hinic_fdir.dst_ipv6,
-			   ipv6_spec->hdr.dst_addr, 16);
+			   &ipv6_spec->hdr.dst_addr, 16);
 
 		/*
 		 * Check if the next not void item is TCP or UDP or ICMP.
diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index f09b1a6e1ea6..22fb0bffafcc 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -743,7 +743,7 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
 	else
 		psd_hdr.len = ipv6_hdr->payload_len;
 
-	sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+	sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
 		sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0);
 	sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
 	return __rte_raw_cksum_reduce(sum);
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 37eb2b4c3807..bf1eee506dde 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -822,10 +822,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 						  "Only support src & dst ip,proto in IPV6");
 		}
 		net_addr_to_host(rule->key_conf.mask.src_ip,
-				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
+				 (const rte_be32_t *)&ipv6_mask->hdr.src_addr,
 				 IP_ADDR_LEN);
 		net_addr_to_host(rule->key_conf.mask.dst_ip,
-				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
+				 (const rte_be32_t *)&ipv6_mask->hdr.dst_addr,
 				 IP_ADDR_LEN);
 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
@@ -838,10 +838,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
 	ipv6_spec = item->spec;
 	net_addr_to_host(rule->key_conf.spec.src_ip,
-			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
+			 (const rte_be32_t *)&ipv6_spec->hdr.src_addr,
 			 IP_ADDR_LEN);
 	net_addr_to_host(rule->key_conf.spec.dst_ip,
-			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
+			 (const rte_be32_t *)&ipv6_spec->hdr.dst_addr,
 			 IP_ADDR_LEN);
 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 92165c8422d5..c6857727e8be 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1953,13 +1953,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					return -rte_errno;
 				}
 
-				if (!memcmp(ipv6_mask->hdr.src_addr,
+				if (!memcmp(&ipv6_mask->hdr.src_addr,
 					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					    sizeof(ipv6_mask->hdr.src_addr)))
 					input_set |= I40E_INSET_IPV6_SRC;
-				if (!memcmp(ipv6_mask->hdr.dst_addr,
+				if (!memcmp(&ipv6_mask->hdr.dst_addr,
 					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					    sizeof(ipv6_mask->hdr.dst_addr)))
 					input_set |= I40E_INSET_IPV6_DST;
 
 				if ((ipv6_mask->hdr.vtc_flow &
@@ -1987,9 +1987,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					I40E_FDIR_IPTYPE_IPV6;
 
 				rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
-					   ipv6_spec->hdr.src_addr, 16);
+					   &ipv6_spec->hdr.src_addr, 16);
 				rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
-					   ipv6_spec->hdr.dst_addr, 16);
+					   &ipv6_spec->hdr.dst_addr, 16);
 
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 811a10287b70..321346425465 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1048,14 +1048,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 								 HOP_LIMIT);
 			}
 
-			if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.src_addr))) {
+			if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.src_addr))) {
 				input_set |= IAVF_INSET_IPV6_SRC;
 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
 								 SRC);
 			}
-			if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+			if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.dst_addr))) {
 				input_set |= IAVF_INSET_IPV6_DST;
 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
 								 DST);
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 74e1e7099b8c..eb5a3feab189 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -354,23 +354,23 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 				}
 
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						*input |= IAVF_INSET_IPV6_SRC;
 						break;
 					}
 				}
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						*input |= IAVF_INSET_IPV6_DST;
 						break;
 					}
 				}
 
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j])
+					if (ipv6_mask->hdr.src_addr.a[j])
 						input_set_byte++;
 
-					if (ipv6_mask->hdr.dst_addr[j])
+					if (ipv6_mask->hdr.dst_addr.a[j])
 						input_set_byte++;
 				}
 
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 6fd45ff45f3d..89dd5af5500f 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1738,8 +1738,8 @@ static void
 parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
 		struct rte_ipv6_hdr *ipv6)
 {
-	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
-	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+	ipv6->src_addr = item->hdr.src_addr;
+	ipv6->dst_addr = item->hdr.dst_addr;
 }
 
 static void
@@ -1904,7 +1904,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad,
 			ipsec_flow->spi,
 			0,
 			0,
-			ipsec_flow->ipv6_hdr.dst_addr,
+			ipsec_flow->ipv6_hdr.dst_addr.a,
 			0,
 			ipsec_flow->is_udp,
 			ipsec_flow->udp_hdr.dst_port);
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 741107f93939..406918fed547 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -2097,11 +2097,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 				return -rte_errno;
 			}
 
-			if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.src_addr)))
+			if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.src_addr)))
 				*input_set |= ICE_INSET_IPV6_SRC;
-			if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+			if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.dst_addr)))
 				*input_set |= ICE_INSET_IPV6_DST;
 
 			if ((ipv6_mask->hdr.vtc_flow &
@@ -2113,8 +2113,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
 				*input_set |= ICE_INSET_IPV6_HOP_LIMIT;
 
-			rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
-			rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(&p_v6->dst_ip, &ipv6_spec->hdr.dst_addr, 16);
+			rte_memcpy(&p_v6->src_ip, &ipv6_spec->hdr.src_addr, 16);
 			vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
 			p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
 			p_v6->proto = ipv6_spec->hdr.proto;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 122b87f625a7..28bc775a2c34 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -665,13 +665,13 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
 				}
 
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						*input |= ICE_INSET_IPV6_SRC;
 						break;
 					}
 				}
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						*input |= ICE_INSET_IPV6_DST;
 						break;
 					}
@@ -691,18 +691,18 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
 				f = &list[t].h_u.ipv6_hdr;
 				s = &list[t].m_u.ipv6_hdr;
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						f->src_addr[j] =
-						ipv6_spec->hdr.src_addr[j];
+						ipv6_spec->hdr.src_addr.a[j];
 						s->src_addr[j] =
-						ipv6_mask->hdr.src_addr[j];
+						ipv6_mask->hdr.src_addr.a[j];
 						input_set_byte++;
 					}
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						f->dst_addr[j] =
-						ipv6_spec->hdr.dst_addr[j];
+						ipv6_spec->hdr.dst_addr.a[j];
 						s->dst_addr[j] =
-						ipv6_mask->hdr.dst_addr[j];
+						ipv6_mask->hdr.dst_addr.a[j];
 						input_set_byte++;
 					}
 				}
diff --git a/drivers/net/igc/igc_flow.c b/drivers/net/igc/igc_flow.c
index b677a0d61340..b778ac26135a 100644
--- a/drivers/net/igc/igc_flow.c
+++ b/drivers/net/igc/igc_flow.c
@@ -435,8 +435,8 @@ igc_parse_pattern_ipv6(const struct rte_flow_item *item,
 	if (mask->hdr.vtc_flow ||
 		mask->hdr.payload_len ||
 		mask->hdr.hop_limits ||
-		!igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
-		!igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
+		!igc_is_zero_ipv6_addr(&mask->hdr.src_addr) ||
+		!igc_is_zero_ipv6_addr(&mask->hdr.dst_addr))
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM, item,
 				"IPv6 only support protocol");
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 687341c6b8d3..1b35ed5faabe 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1917,9 +1917,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
 		/* check src addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.src_addr[j] == 0) {
+			if (ipv6_mask->hdr.src_addr.a[j] == 0) {
 				rule->mask.src_ipv6_mask &= ~(1 << j);
-			} else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) {
+			} else if (ipv6_mask->hdr.src_addr.a[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1930,9 +1930,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
 		/* check dst addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.dst_addr[j] == 0) {
+			if (ipv6_mask->hdr.dst_addr.a[j] == 0) {
 				rule->mask.dst_ipv6_mask &= ~(1 << j);
-			} else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+			} else if (ipv6_mask->hdr.dst_addr.a[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1945,9 +1945,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 			rule->b_spec = TRUE;
 			ipv6_spec = item->spec;
 			rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 		}
 
 		/**
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 3a666ba15f59..778004cbe4d2 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -681,9 +681,9 @@ ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
 			ic_session->src_ip.type = IPv6;
 			ic_session->dst_ip.type = IPv6;
 			rte_memcpy(ic_session->src_ip.ipv6,
-				   ipv6->hdr.src_addr, 16);
+				   &ipv6->hdr.src_addr, 16);
 			rte_memcpy(ic_session->dst_ip.ipv6,
-				   ipv6->hdr.dst_addr, 16);
+				   &ipv6->hdr.dst_addr, 16);
 		} else {
 			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
 			ic_session->src_ip.type = IPv4;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 10b986d66bd7..a9fa5d06edcc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -176,14 +176,14 @@ struct mlx5dr_definer_conv_data {
 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
 	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr[0],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr[4],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr[8],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr[12],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr[0],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr[4],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr[8],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr[12],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr.a[0],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr.a[4],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr.a[8],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr.a[12],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr.a[0],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr.a[4],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr.a[8],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr.a[12],	rte_flow_item_ipv6) \
 	X(SET,		ipv6_version,		STE_IPV6,		rte_flow_item_ipv6) \
 	X(SET,		ipv6_frag,		v->has_frag_ext,	rte_flow_item_ipv6) \
 	X(SET,		icmp_protocol,		STE_ICMP,		rte_flow_item_icmp) \
@@ -1161,8 +1161,8 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
 	    m->has_hip_ext || m->has_shim6_ext ||
 	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
-		   !is_mem_zero(l->hdr.src_addr, 16) ||
-		   !is_mem_zero(l->hdr.dst_addr, 16)))) {
+		   !is_mem_zero(l->hdr.src_addr.a, 16) ||
+		   !is_mem_zero(l->hdr.dst_addr.a, 16)))) {
 		rte_errno = ENOTSUP;
 		return rte_errno;
 	}
@@ -1219,56 +1219,56 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 4, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 4, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 8, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 8, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 12, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 12, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 4, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 4, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 8, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 8, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 12, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 12, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index effc61cdc9da..7f8640b488b8 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2933,10 +2933,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
 	const struct rte_flow_item_ipv6 *spec = item->spec;
 	const struct rte_flow_item_ipv6 nic_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.vtc_flow = RTE_BE32(0xffffffff),
 			.proto = 0xff,
 		},
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5f71573a86d6..201e215e4bad 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7696,10 +7696,8 @@ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
 
 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 		.vtc_flow = RTE_BE32(0xffffffff),
 		.proto = 0xff,
 		.hop_limits = 0xff,
@@ -9548,10 +9546,8 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv6 *ipv6_v;
 	const struct rte_flow_item_ipv6 nic_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.vtc_flow = RTE_BE32(0xffffffff),
 			.proto = 0xff,
 			.hop_limits = 0xff,
@@ -9574,11 +9570,11 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
 	for (i = 0; i < size; ++i)
-		l24_v[i] = ipv6_m->hdr.dst_addr[i] & ipv6_v->hdr.dst_addr[i];
+		l24_v[i] = ipv6_m->hdr.dst_addr.a[i] & ipv6_v->hdr.dst_addr.a[i];
 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
 	for (i = 0; i < size; ++i)
-		l24_v[i] = ipv6_m->hdr.src_addr[i] & ipv6_v->hdr.src_addr[i];
+		l24_v[i] = ipv6_m->hdr.src_addr.a[i] & ipv6_v->hdr.src_addr.a[i];
 	/* TOS. */
 	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index c5ddd1d40433..0084f819804f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8301,10 +8301,8 @@ const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
 		.payload_len = RTE_BE16(0xffff),
 		.proto = 0xff,
 		.hop_limits = 0xff,
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 	},
 	.has_frag_ext = 1,
 };
@@ -14741,10 +14739,10 @@ flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			memcpy(data.dst.ipv6_addr,
-			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
+			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
 			       sizeof(data.dst.ipv6_addr));
 			memcpy(data.src.ipv6_addr,
-			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
+			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
 			       sizeof(data.src.ipv6_addr));
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 3a4356c0f650..5b4a4eda3bbc 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -600,13 +600,13 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
 		uint32_t vtc_flow_val;
 		uint32_t vtc_flow_mask;
 
-		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+		memcpy(&ipv6.val.src_ip, &spec->hdr.src_addr,
 		       RTE_DIM(ipv6.val.src_ip));
-		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+		memcpy(&ipv6.val.dst_ip, &spec->hdr.dst_addr,
 		       RTE_DIM(ipv6.val.dst_ip));
-		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+		memcpy(&ipv6.mask.src_ip, &mask->hdr.src_addr,
 		       RTE_DIM(ipv6.mask.src_ip));
-		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+		memcpy(&ipv6.mask.dst_ip, &mask->hdr.dst_addr,
 		       RTE_DIM(ipv6.mask.dst_ip));
 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
index e74a5f83f55b..098523ada653 100644
--- a/drivers/net/mvpp2/mrvl_flow.c
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -536,27 +536,23 @@ mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
 	       int parse_dst, struct rte_flow *flow)
 {
 	struct pp2_cls_rule_key_field *key_field;
-	int size = sizeof(spec->hdr.dst_addr);
-	struct in6_addr k, m;
+	struct rte_ipv6_addr k, m;
 
-	memset(&k, 0, sizeof(k));
 	if (parse_dst) {
-		memcpy(k.s6_addr, spec->hdr.dst_addr, size);
-		memcpy(m.s6_addr, mask->hdr.dst_addr, size);
-
+		k = spec->hdr.dst_addr;
+		m = mask->hdr.dst_addr;
 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
 			MV_NET_IP6_F_DA;
 	} else {
-		memcpy(k.s6_addr, spec->hdr.src_addr, size);
-		memcpy(m.s6_addr, mask->hdr.src_addr, size);
-
+		k = spec->hdr.src_addr;
+		m = mask->hdr.src_addr;
 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
 			MV_NET_IP6_F_SA;
 	}
 
 	key_field = &flow->rule.fields[flow->rule.num_fields];
 	mrvl_alloc_key_mask(key_field);
-	key_field->size = 16;
+	key_field->size = RTE_IPV6_ADDR_SIZE;
 
 	inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
 	inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c
index e94c7e22e371..43574afea8ac 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -2066,18 +2066,18 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
 
 			ipv6_gre_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 			ipv6_gre_tun->ip_ext.ttl = hdr->hop_limits;
-			memcpy(ipv6_gre_tun->ipv6.ipv6_src, hdr->src_addr,
+			memcpy(ipv6_gre_tun->ipv6.ipv6_src, &hdr->src_addr,
 					sizeof(ipv6_gre_tun->ipv6.ipv6_src));
-			memcpy(ipv6_gre_tun->ipv6.ipv6_dst, hdr->dst_addr,
+			memcpy(ipv6_gre_tun->ipv6.ipv6_dst, &hdr->dst_addr,
 					sizeof(ipv6_gre_tun->ipv6.ipv6_dst));
 		} else {
 			ipv6_udp_tun = (struct nfp_flower_ipv6_udp_tun *)(*param->mbuf_off);
 
 			ipv6_udp_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 			ipv6_udp_tun->ip_ext.ttl = hdr->hop_limits;
-			memcpy(ipv6_udp_tun->ipv6.ipv6_src, hdr->src_addr,
+			memcpy(ipv6_udp_tun->ipv6.ipv6_src, &hdr->src_addr,
 					sizeof(ipv6_udp_tun->ipv6.ipv6_src));
-			memcpy(ipv6_udp_tun->ipv6.ipv6_dst, hdr->dst_addr,
+			memcpy(ipv6_udp_tun->ipv6.ipv6_dst, &hdr->dst_addr,
 					sizeof(ipv6_udp_tun->ipv6.ipv6_dst));
 		}
 	} else {
@@ -2100,8 +2100,8 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
 		ipv6->ip_ext.tos   = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 		ipv6->ip_ext.proto = hdr->proto;
 		ipv6->ip_ext.ttl   = hdr->hop_limits;
-		memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src));
-		memcpy(ipv6->ipv6_dst, hdr->dst_addr, sizeof(ipv6->ipv6_dst));
+		memcpy(ipv6->ipv6_src, &hdr->src_addr, sizeof(ipv6->ipv6_src));
+		memcpy(ipv6->ipv6_dst, &hdr->dst_addr, sizeof(ipv6->ipv6_dst));
 
 ipv6_end:
 		*param->mbuf_off += sizeof(struct nfp_flower_ipv6);
@@ -2557,10 +2557,8 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 				.vtc_flow   = RTE_BE32(0x0ff00000),
 				.proto      = 0xff,
 				.hop_limits = 0xff,
-				.src_addr   = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-						0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr   = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-						0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr   = RTE_IPV6_MASK_FULL,
+				.dst_addr   = RTE_IPV6_MASK_FULL,
 			},
 			.has_frag_ext = 1,
 		},
@@ -3363,8 +3361,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
 	struct nfp_flower_cmsg_tun_neigh_v6 payload;
 
 	tun->payload.v6_flag = 1;
-	memcpy(tun->payload.dst.dst_ipv6, ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
-	memcpy(tun->payload.src.src_ipv6, ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
+	memcpy(tun->payload.dst.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
+	memcpy(tun->payload.src.src_ipv6, &ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
 	memcpy(tun->payload.dst_addr, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	memcpy(tun->payload.src_addr, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 
@@ -3384,8 +3382,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
 			sizeof(struct nfp_flower_meta_tci));
 
 	memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v6));
-	memcpy(payload.dst_ipv6, ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
-	memcpy(payload.src_ipv6, ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
+	memcpy(payload.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
+	memcpy(payload.src_ipv6, &ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
 	memcpy(payload.common.dst_mac, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	memcpy(payload.common.src_mac, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	payload.common.port_id = port->in_port;
@@ -3612,7 +3610,7 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
@@ -3982,7 +3980,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
@@ -4059,7 +4057,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c
index e9f0ce37109a..d72f6ce84c44 100644
--- a/drivers/net/nfp/nfp_net_flow.c
+++ b/drivers/net/nfp/nfp_net_flow.c
@@ -297,28 +297,28 @@ nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow,
 
 	ipv6->l4_protocol_mask = mask->hdr.proto;
 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
-		ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3];
-		ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2];
-		ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1];
-		ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i];
+		ipv6->src_ipv6_mask[i] = mask->hdr.src_addr.a[i + 3];
+		ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr.a[i + 2];
+		ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr.a[i + 1];
+		ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr.a[i];
 
-		ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3];
-		ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2];
-		ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1];
-		ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i];
+		ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr.a[i + 3];
+		ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr.a[i + 2];
+		ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr.a[i + 1];
+		ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr.a[i];
 	}
 
 	ipv6->l4_protocol = spec->hdr.proto;
 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
-		ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3];
-		ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2];
-		ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1];
-		ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i];
+		ipv6->src_ipv6[i] = spec->hdr.src_addr.a[i + 3];
+		ipv6->src_ipv6[i + 1] = spec->hdr.src_addr.a[i + 2];
+		ipv6->src_ipv6[i + 2] = spec->hdr.src_addr.a[i + 1];
+		ipv6->src_ipv6[i + 3] = spec->hdr.src_addr.a[i];
 
-		ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3];
-		ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2];
-		ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1];
-		ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i];
+		ipv6->dst_ipv6[i] = spec->hdr.dst_addr.a[i + 3];
+		ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr.a[i + 2];
+		ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr.a[i + 1];
+		ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr.a[i];
 	}
 
 	return 0;
@@ -406,10 +406,8 @@ static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = {
 		.mask_support = &(const struct rte_flow_item_ipv6){
 			.hdr = {
 				.proto    = 0xff,
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 			},
 		},
 		.mask_default = &rte_flow_item_ipv6_mask,
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index d98266eac55c..14fb4338e9c7 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -794,9 +794,9 @@ qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
 
 				spec = pattern->spec;
 				memcpy(flow->entry.tuple.src_ipv6,
-				       spec->hdr.src_addr, IPV6_ADDR_LEN);
+				       &spec->hdr.src_addr, IPV6_ADDR_LEN);
 				memcpy(flow->entry.tuple.dst_ipv6,
-				       spec->hdr.dst_addr, IPV6_ADDR_LEN);
+				       &spec->hdr.dst_addr, IPV6_ADDR_LEN);
 				flow->entry.tuple.eth_proto =
 					RTE_ETHER_TYPE_IPV6;
 			}
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 1b50aefe5c48..1006243539b5 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -575,14 +575,8 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
 	const struct rte_flow_item_ipv6 supp_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.proto = 0xff,
 		}
 	};
@@ -618,28 +612,28 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
 	 * IPv6 addresses are in big-endian byte order in item and in
 	 * efx_spec
 	 */
-	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+	if (memcmp(&mask->hdr.src_addr, &supp_mask.hdr.src_addr,
 		   sizeof(mask->hdr.src_addr)) == 0) {
 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
 
 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
 				 sizeof(spec->hdr.src_addr));
-		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+		rte_memcpy(&efx_spec->efs_rem_host, &spec->hdr.src_addr,
 			   sizeof(efx_spec->efs_rem_host));
-	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+	} else if (!sfc_flow_is_zero(mask->hdr.src_addr.a,
 				     sizeof(mask->hdr.src_addr))) {
 		goto fail_bad_mask;
 	}
 
-	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+	if (memcmp(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr,
 		   sizeof(mask->hdr.dst_addr)) == 0) {
 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
 
 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
 				 sizeof(spec->hdr.dst_addr));
-		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+		rte_memcpy(&efx_spec->efs_loc_host, &spec->hdr.dst_addr,
 			   sizeof(efx_spec->efs_loc_host));
-	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr.a,
 				     sizeof(mask->hdr.dst_addr))) {
 		goto fail_bad_mask;
 	}
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 5ae1faf9165d..51ec07eb5acd 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -209,10 +209,8 @@ static const struct tap_flow_items tap_flow_items[] = {
 			       RTE_FLOW_ITEM_TYPE_TCP),
 		.mask = &(const struct rte_flow_item_ipv6){
 			.hdr = {
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 				.proto = -1,
 			},
 		},
@@ -613,13 +611,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
 		info->eth_type = htons(ETH_P_IPV6);
 	if (!spec)
 		return 0;
-	if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
+	if (memcmp(&mask->hdr.dst_addr, empty_addr, 16)) {
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
 			   sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
 			   sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
 	}
-	if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
+	if (memcmp(&mask->hdr.src_addr, empty_addr, 16)) {
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
 			   sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7ef52d0b0fcd..5d2dd453687c 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1807,9 +1807,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 
 		/* check src addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+			if (ipv6_mask->hdr.src_addr.a[j] == UINT8_MAX) {
 				rule->mask.src_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.src_addr[j] != 0) {
+			} else if (ipv6_mask->hdr.src_addr.a[j] != 0) {
 				memset(rule, 0, sizeof(struct txgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1820,9 +1820,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 
 		/* check dst addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+			if (ipv6_mask->hdr.dst_addr.a[j] == UINT8_MAX) {
 				rule->mask.dst_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+			} else if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
 				memset(rule, 0, sizeof(struct txgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1835,9 +1835,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 			rule->b_spec = TRUE;
 			ipv6_spec = item->spec;
 			rte_memcpy(rule->input.src_ip,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(rule->input.dst_ip,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 		}
 
 		/**
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 4af49dd802d0..65b6c251c684 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -659,9 +659,9 @@ txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
 			ic_session->src_ip.type = IPv6;
 			ic_session->dst_ip.type = IPv6;
 			rte_memcpy(ic_session->src_ip.ipv6,
-				   ipv6->hdr.src_addr, 16);
+				   &ipv6->hdr.src_addr, 16);
 			rte_memcpy(ic_session->dst_ip.ipv6,
-				   ipv6->hdr.dst_addr, 16);
+				   &ipv6->hdr.dst_addr, 16);
 		} else {
 			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
 			ic_session->src_ip.type = IPv4;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 736eae6f05ee..4c0fa5054a2e 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -311,7 +311,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			port_out = next_hop;
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
index 63352257c6e9..792aab0059e9 100644
--- a/examples/ip_pipeline/pipeline.c
+++ b/examples/ip_pipeline/pipeline.c
@@ -637,7 +637,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 1,
 		.input_index = 1,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[0]),
 	},
 
 	[2] = {
@@ -645,7 +645,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 2,
 		.input_index = 2,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[4]),
 	},
 
 	[3] = {
@@ -653,7 +653,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 3,
 		.input_index = 3,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[8]),
 	},
 
 	[4] = {
@@ -661,7 +661,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 4,
 		.input_index = 4,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[12]),
 	},
 
 	/* Destination IP address (IPv6) */
@@ -670,7 +670,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 5,
 		.input_index = 5,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[0]),
 	},
 
 	[6] = {
@@ -678,7 +678,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 6,
 		.input_index = 6,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[4]),
 	},
 
 	[7] = {
@@ -686,7 +686,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 7,
 		.input_index = 7,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[8]),
 	},
 
 	[8] = {
@@ -694,7 +694,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 8,
 		.input_index = 8,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[12]),
 	},
 
 	/* Source Port */
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index c7019078f7b4..4da692eb23e6 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -400,7 +400,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
 		}
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			dst_port = next_hop;
diff --git a/examples/ipsec-secgw/flow.c b/examples/ipsec-secgw/flow.c
index 05a62c3020fa..3f7630f5fd53 100644
--- a/examples/ipsec-secgw/flow.c
+++ b/examples/ipsec-secgw/flow.c
@@ -83,29 +83,8 @@ ipv4_addr_cpy(rte_be32_t *spec, rte_be32_t *mask, char *token,
 static void
 ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
 {
-	uint8_t *addr;
-
-	addr = hdr->src_addr;
-	printf("src: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx \t",
-	       (uint16_t)((addr[0] << 8) | addr[1]),
-	       (uint16_t)((addr[2] << 8) | addr[3]),
-	       (uint16_t)((addr[4] << 8) | addr[5]),
-	       (uint16_t)((addr[6] << 8) | addr[7]),
-	       (uint16_t)((addr[8] << 8) | addr[9]),
-	       (uint16_t)((addr[10] << 8) | addr[11]),
-	       (uint16_t)((addr[12] << 8) | addr[13]),
-	       (uint16_t)((addr[14] << 8) | addr[15]));
-
-	addr = hdr->dst_addr;
-	printf("dst: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx",
-	       (uint16_t)((addr[0] << 8) | addr[1]),
-	       (uint16_t)((addr[2] << 8) | addr[3]),
-	       (uint16_t)((addr[4] << 8) | addr[5]),
-	       (uint16_t)((addr[6] << 8) | addr[7]),
-	       (uint16_t)((addr[8] << 8) | addr[9]),
-	       (uint16_t)((addr[10] << 8) | addr[11]),
-	       (uint16_t)((addr[12] << 8) | addr[13]),
-	       (uint16_t)((addr[14] << 8) | addr[15]));
+	printf("src: " RTE_IPV6_ADDR_FMT " \t", RTE_IPV6_ADDR_SPLIT(&hdr->src_addr));
+	printf("dst: " RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&hdr->dst_addr));
 }
 
 static int
@@ -196,8 +175,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
 				if (status->status < 0)
 					return;
-				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr,
-						  rule->ipv6.mask.hdr.src_addr,
+				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr.a,
+						  rule->ipv6.mask.hdr.src_addr.a,
 						  tokens[ti], status))
 					return;
 			}
@@ -205,8 +184,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
 				if (status->status < 0)
 					return;
-				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr,
-						  rule->ipv6.mask.hdr.dst_addr,
+				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr.a,
+						  rule->ipv6.mask.hdr.dst_addr.a,
 						  tokens[ti], status))
 					return;
 			}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index b52b0ffc3d22..ebde28639c12 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -529,9 +529,9 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
 			sa->pattern[1].spec = &sa->ipv6_spec;
 
-			memcpy(sa->ipv6_spec.hdr.dst_addr,
+			memcpy(&sa->ipv6_spec.hdr.dst_addr,
 				sa->dst.ip.ip6.ip6_b, 16);
-			memcpy(sa->ipv6_spec.hdr.src_addr,
+			memcpy(&sa->ipv6_spec.hdr.src_addr,
 			       sa->src.ip.ip6.ip6_b, 16);
 		} else if (IS_IP4(sa->flags)) {
 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
@@ -735,9 +735,9 @@ create_ipsec_esp_flow(struct ipsec_sa *sa)
 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
 		sa->pattern[1].spec = &sa->ipv6_spec;
-		memcpy(sa->ipv6_spec.hdr.dst_addr,
+		memcpy(&sa->ipv6_spec.hdr.dst_addr,
 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
-		memcpy(sa->ipv6_spec.hdr.src_addr,
+		memcpy(&sa->ipv6_spec.hdr.src_addr,
 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
 		sa->pattern[2].spec = &sa->esp_spec;
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index c4bac17cd77c..1a0afd2ed2e8 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1571,8 +1571,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
 	};
 
 	if (IS_IP6_TUNNEL(lsa->flags)) {
-		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
-		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
+		memcpy(&v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
+		memcpy(&v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
 	}
 
 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
diff --git a/examples/ipsec-secgw/sad.h b/examples/ipsec-secgw/sad.h
index 3224b6252c8d..fdb1d2ef1790 100644
--- a/examples/ipsec-secgw/sad.h
+++ b/examples/ipsec-secgw/sad.h
@@ -5,6 +5,8 @@
 #ifndef __SAD_H__
 #define __SAD_H__
 
+#include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_ipsec_sad.h>
 
 #define SA_CACHE_SZ	128
@@ -37,8 +39,8 @@ cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
 			(sa->dst.ip.ip4 == ipv4->dst_addr)) ||
 			/* IPv6 check */
 			(!is_v4 && (sa_type == IP6_TUNNEL) &&
-			(!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
-			(!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
+			(!memcmp(sa->src.ip.ip6.ip6, &ipv6->src_addr, 16)) &&
+			(!memcmp(sa->dst.ip.ip6.ip6, &ipv6->dst_addr, 16))))
 		return 1;
 
 	return 0;
@@ -128,9 +130,9 @@ sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
 				}
 			}
 			v6[nb_v6].spi = esp->spi;
-			memcpy(v6[nb_v6].dip, ipv6->dst_addr,
+			memcpy(v6[nb_v6].dip, &ipv6->dst_addr,
 					sizeof(ipv6->dst_addr));
-			memcpy(v6[nb_v6].sip, ipv6->src_addr,
+			memcpy(v6[nb_v6].sip, &ipv6->src_addr,
 					sizeof(ipv6->src_addr));
 			keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
 						&v6[nb_v6];
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 993e36cec235..85f862dd5b40 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -65,7 +65,7 @@ fib_parse_packet(struct rte_mbuf *mbuf,
 	/* IPv6 */
 	else {
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
-		rte_mov16(ipv6, (const uint8_t *)ipv6_hdr->dst_addr);
+		rte_mov16(ipv6, ipv6_hdr->dst_addr.a);
 		*ip_type = 0;
 		(*ipv6_cnt)++;
 	}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index e8fd95aae9ce..422fdb70054d 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -62,7 +62,7 @@ lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
 		      uint16_t portid,
 		      struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
 {
-	const uint8_t *dst_ip = ipv6_hdr->dst_addr;
+	const uint8_t *dst_ip = ipv6_hdr->dst_addr.a;
 	uint32_t next_hop;
 
 	if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
@@ -122,7 +122,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
 
 		return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
-				ipv6_hdr->dst_addr, &next_hop) == 0)
+				ipv6_hdr->dst_addr.a, &next_hop) == 0)
 				? next_hop : portid);
 
 	}
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index 22c5c147d0ea..e8baedcc79d8 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -1005,10 +1005,8 @@ struct rte_flow_item_ipv6 {
 #ifndef __cplusplus
 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 	},
 };
 #endif
diff --git a/lib/hash/rte_thash.h b/lib/hash/rte_thash.h
index ec0c029402fc..eab753a06f3d 100644
--- a/lib/hash/rte_thash.h
+++ b/lib/hash/rte_thash.h
@@ -139,24 +139,24 @@ rte_thash_load_v6_addrs(const struct rte_ipv6_hdr *orig,
 			union rte_thash_tuple *targ)
 {
 #ifdef RTE_ARCH_X86
-	__m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
+	__m128i ipv6 = _mm_loadu_si128((const __m128i *)&orig->src_addr);
 	*(__m128i *)targ->v6.src_addr =
 			_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
-	ipv6 = _mm_loadu_si128((const __m128i *)orig->dst_addr);
+	ipv6 = _mm_loadu_si128((const __m128i *)&orig->dst_addr);
 	*(__m128i *)targ->v6.dst_addr =
 			_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
 #elif defined(__ARM_NEON)
-	uint8x16_t ipv6 = vld1q_u8((uint8_t const *)orig->src_addr);
+	uint8x16_t ipv6 = vld1q_u8((uint8_t const *)&orig->src_addr);
 	vst1q_u8((uint8_t *)targ->v6.src_addr, vrev32q_u8(ipv6));
-	ipv6 = vld1q_u8((uint8_t const *)orig->dst_addr);
+	ipv6 = vld1q_u8((uint8_t const *)&orig->dst_addr);
 	vst1q_u8((uint8_t *)targ->v6.dst_addr, vrev32q_u8(ipv6));
 #else
 	int i;
 	for (i = 0; i < 4; i++) {
 		*((uint32_t *)targ->v6.src_addr + i) =
-			rte_be_to_cpu_32(*((const uint32_t *)orig->src_addr + i));
+			rte_be_to_cpu_32(*((const uint32_t *)&orig->src_addr + i));
 		*((uint32_t *)targ->v6.dst_addr + i) =
-			rte_be_to_cpu_32(*((const uint32_t *)orig->dst_addr + i));
+			rte_be_to_cpu_32(*((const uint32_t *)&orig->dst_addr + i));
 	}
 #endif
 }
diff --git a/lib/ip_frag/rte_ipv6_reassembly.c b/lib/ip_frag/rte_ipv6_reassembly.c
index 88863a98d1fe..9471ce5333d7 100644
--- a/lib/ip_frag/rte_ipv6_reassembly.c
+++ b/lib/ip_frag/rte_ipv6_reassembly.c
@@ -143,8 +143,8 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
 	int32_t ip_len;
 	int32_t trim;
 
-	rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
-	rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
+	rte_memcpy(&key.src_dst[0], &ip_hdr->src_addr, 16);
+	rte_memcpy(&key.src_dst[2], &ip_hdr->dst_addr, 16);
 
 	key.id = frag_hdr->id;
 	key.key_len = IPV6_KEYLEN;
diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 725402e6d08e..d364295b11d2 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -217,8 +217,8 @@ struct rte_ipv6_hdr {
 	rte_be16_t payload_len;	/**< IP payload size, including ext. headers */
 	uint8_t  proto;		/**< Protocol, next header. */
 	uint8_t  hop_limits;	/**< Hop limits. */
-	uint8_t  src_addr[16];	/**< IP address of source host. */
-	uint8_t  dst_addr[16];	/**< IP address of destination host(s). */
+	struct rte_ipv6_addr src_addr;	/**< IP address of source host. */
+	struct rte_ipv6_addr dst_addr;	/**< IP address of destination host(s). */
 } __rte_packed;
 
 /* IPv6 routing extension type definition. */
@@ -286,7 +286,7 @@ rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
 	else
 		psd_hdr.len = ipv6_hdr->payload_len;
 
-	sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+	sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
 		sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr),
 		0);
 	sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
diff --git a/lib/node/ip6_lookup.c b/lib/node/ip6_lookup.c
index 309964f60fd6..6bbcf14e2aa8 100644
--- a/lib/node/ip6_lookup.c
+++ b/lib/node/ip6_lookup.c
@@ -112,28 +112,28 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[0], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[0], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf1 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf1, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[1], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[1], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf2 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf2, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[2], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[2], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf3 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf3, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[3], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[3], &ipv6_hdr->dst_addr, 16);
 
 		rte_lpm6_lookup_bulk_func(lpm6, ip_batch, next_hop, 4);
 
@@ -223,7 +223,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 		/* Extract TTL as IPv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
 
-		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr, &next_hop);
+		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr.a, &next_hop);
 		next_hop = (rc == 0) ? next_hop : drop_nh;
 
 		node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
diff --git a/lib/pipeline/rte_swx_ipsec.c b/lib/pipeline/rte_swx_ipsec.c
index 73e8211b2818..0ed0ecd134c8 100644
--- a/lib/pipeline/rte_swx_ipsec.c
+++ b/lib/pipeline/rte_swx_ipsec.c
@@ -1386,13 +1386,11 @@ tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p
 		.payload_len = 0, /* Cannot be pre-computed. */
 		.proto = IPPROTO_ESP,
 		.hop_limits = 64,
-		.src_addr = {0},
-		.dst_addr = {0},
 	};
 
 	memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
-	memcpy(h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
-	memcpy(h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
+	memcpy(&h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
+	memcpy(&h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
 }
 
 /* IPsec library SA parameters. */
diff --git a/lib/pipeline/rte_table_action.c b/lib/pipeline/rte_table_action.c
index 87c3e0e2c935..c0be656536eb 100644
--- a/lib/pipeline/rte_table_action.c
+++ b/lib/pipeline/rte_table_action.c
@@ -871,10 +871,10 @@ encap_vxlan_apply(void *data,
 			d->ipv6.payload_len = 0; /* not pre-computed */
 			d->ipv6.proto = IP_PROTO_UDP;
 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-			memcpy(d->ipv6.src_addr,
+			memcpy(&d->ipv6.src_addr,
 				p->vxlan.ipv6.sa,
 				sizeof(p->vxlan.ipv6.sa));
-			memcpy(d->ipv6.dst_addr,
+			memcpy(&d->ipv6.dst_addr,
 				p->vxlan.ipv6.da,
 				sizeof(p->vxlan.ipv6.da));
 
@@ -906,10 +906,10 @@ encap_vxlan_apply(void *data,
 			d->ipv6.payload_len = 0; /* not pre-computed */
 			d->ipv6.proto = IP_PROTO_UDP;
 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-			memcpy(d->ipv6.src_addr,
+			memcpy(&d->ipv6.src_addr,
 				p->vxlan.ipv6.sa,
 				sizeof(p->vxlan.ipv6.sa));
-			memcpy(d->ipv6.dst_addr,
+			memcpy(&d->ipv6.dst_addr,
 				p->vxlan.ipv6.da,
 				sizeof(p->vxlan.ipv6.da));
 
@@ -1436,12 +1436,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t tcp_cksum;
 
 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
-				(uint16_t *)ip->src_addr,
+				(uint16_t *)&ip->src_addr,
 				(uint16_t *)data->addr,
 				tcp->src_port,
 				data->port);
 
-			rte_memcpy(ip->src_addr, data->addr, 16);
+			rte_memcpy(&ip->src_addr, data->addr, 16);
 			tcp->src_port = data->port;
 			tcp->cksum = tcp_cksum;
 		} else {
@@ -1449,12 +1449,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t udp_cksum;
 
 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
-				(uint16_t *)ip->src_addr,
+				(uint16_t *)&ip->src_addr,
 				(uint16_t *)data->addr,
 				udp->src_port,
 				data->port);
 
-			rte_memcpy(ip->src_addr, data->addr, 16);
+			rte_memcpy(&ip->src_addr, data->addr, 16);
 			udp->src_port = data->port;
 			udp->dgram_cksum = udp_cksum;
 		}
@@ -1464,12 +1464,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t tcp_cksum;
 
 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
-				(uint16_t *)ip->dst_addr,
+				(uint16_t *)&ip->dst_addr,
 				(uint16_t *)data->addr,
 				tcp->dst_port,
 				data->port);
 
-			rte_memcpy(ip->dst_addr, data->addr, 16);
+			rte_memcpy(&ip->dst_addr, data->addr, 16);
 			tcp->dst_port = data->port;
 			tcp->cksum = tcp_cksum;
 		} else {
@@ -1477,12 +1477,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t udp_cksum;
 
 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
-				(uint16_t *)ip->dst_addr,
+				(uint16_t *)&ip->dst_addr,
 				(uint16_t *)data->addr,
 				udp->dst_port,
 				data->port);
 
-			rte_memcpy(ip->dst_addr, data->addr, 16);
+			rte_memcpy(&ip->dst_addr, data->addr, 16);
 			udp->dst_port = data->port;
 			udp->dgram_cksum = udp_cksum;
 		}
-- 
2.47.0


^ permalink raw reply	[relevance 1%]

* [PATCH dpdk v4 05/17] lpm6: use IPv6 address structure and utils
    2024-10-18  9:17  1%   ` [PATCH dpdk v4 04/17] net: use IPv6 structure for packet headers Robin Jarry
@ 2024-10-18  9:17  1%   ` Robin Jarry
  2024-10-18  9:17  2%   ` [PATCH dpdk v4 07/17] rib6: " Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18  9:17 UTC (permalink / raw)
  To: dev, Vladimir Medvedkin, Cristian Dumitrescu, Bruce Richardson,
	Konstantin Ananyev, Wathsala Vithanage, Radu Nicolau,
	Akhil Goyal, Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram,
	Zhirun Yan, Pavan Nikhilesh

Replace ad-hoc uint8_t[16] array types in the API of rte_lpm6 with
rte_ipv6_addr structures. Replace duplicate functions and macros with
common ones from rte_ip6.h. Update all code accordingly.

NB: the conversion between 16 bytes arrays and RTE_IPV6() literals was
done automatically with the following python script and adjusted
manually afterwards:

import argparse
import re
import struct

ip = re.compile(
    r"""
    \{
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*
    \}
    """,
    re.VERBOSE,
)

def repl(match):
    u8 = bytes(int(g, 0) for g in match.groups("0"))
    nums = []
    for u16 in struct.unpack("!HHHHHHHH", u8):
        if u16:
            nums.append(f"0x{u16:04x}")
        else:
            nums.append("0")
    return f"RTE_IPV6({', '.join(nums)})"

p = argparse.ArgumentParser()
p.add_argument("args", nargs="+")
args = p.parse_args()

for a in args.args:

    with open(a) as f:
        buf = f.read()

    buf = ip.sub(repl, buf)
    with open(a, "w") as f:
        f.write(buf)

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test-fib/main.c                    |   74 +-
 app/test-pipeline/pipeline_lpm_ipv6.c  |   11 +-
 app/test/test_fib6_perf.c              |    6 +-
 app/test/test_lpm6.c                   |  490 +++---
 app/test/test_lpm6_data.h              | 2025 ++++++++++++------------
 app/test/test_lpm6_perf.c              |   10 +-
 app/test/test_table_combined.c         |    2 +-
 app/test/test_table_tables.c           |    8 +-
 doc/guides/rel_notes/deprecation.rst   |    9 -
 doc/guides/rel_notes/release_24_11.rst |    9 +
 examples/ip_fragmentation/main.c       |   23 +-
 examples/ip_pipeline/thread.c          |    2 +-
 examples/ip_reassembly/main.c          |   23 +-
 examples/ipsec-secgw/ipsec_lpm_neon.h  |    7 +-
 examples/ipsec-secgw/ipsec_worker.c    |   11 +-
 examples/ipsec-secgw/ipsec_worker.h    |    4 +-
 examples/ipsec-secgw/rt.c              |   22 +-
 examples/l3fwd-graph/main.c            |    4 +-
 examples/l3fwd/l3fwd_fib.c             |    4 +-
 examples/l3fwd/l3fwd_lpm.c             |    8 +-
 examples/l3fwd/l3fwd_route.h           |    9 +-
 examples/l3fwd/lpm_route_parse.c       |    9 +-
 examples/l3fwd/main.c                  |   32 +-
 lib/lpm/meson.build                    |    1 +
 lib/lpm/rte_lpm6.c                     |  148 +-
 lib/lpm/rte_lpm6.h                     |   19 +-
 lib/node/ip6_lookup.c                  |   18 +-
 lib/table/rte_table_lpm_ipv6.c         |   12 +-
 lib/table/rte_table_lpm_ipv6.h         |    7 +-
 29 files changed, 1463 insertions(+), 1544 deletions(-)

diff --git a/app/test-fib/main.c b/app/test-fib/main.c
index c49bfe8bcec3..9f45d03d81fb 100644
--- a/app/test-fib/main.c
+++ b/app/test-fib/main.c
@@ -62,25 +62,6 @@ enum {
 	(unsigned)((unsigned char *)&addr)[2],	\
 	(unsigned)((unsigned char *)&addr)[1],	\
 	(unsigned)((unsigned char *)&addr)[0]
-
-#define NIPQUAD6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
-#define NIPQUAD6(addr)				\
-	((uint8_t *)addr)[0] << 8 |	\
-	((uint8_t *)addr)[1],		\
-	((uint8_t *)addr)[2] << 8 |	\
-	((uint8_t *)addr)[3],		\
-	((uint8_t *)addr)[4] << 8 |	\
-	((uint8_t *)addr)[5],		\
-	((uint8_t *)addr)[6] << 8 |	\
-	((uint8_t *)addr)[7],		\
-	((uint8_t *)addr)[8] << 8 |	\
-	((uint8_t *)addr)[9],		\
-	((uint8_t *)addr)[10] << 8 |	\
-	((uint8_t *)addr)[11],		\
-	((uint8_t *)addr)[12] << 8 |	\
-	((uint8_t *)addr)[13],		\
-	((uint8_t *)addr)[14] << 8 |	\
-	((uint8_t *)addr)[15]
 #endif
 
 static struct {
@@ -123,7 +104,7 @@ struct rt_rule_4 {
 };
 
 struct rt_rule_6 {
-	uint8_t		addr[16];
+	struct rte_ipv6_addr addr;
 	uint8_t		depth;
 	uint64_t	nh;
 };
@@ -306,15 +287,15 @@ shuffle_rt_6(struct rt_rule_6 *rt, int n)
 
 	for (i = 0; i < n; i++) {
 		j = rte_rand() % n;
-		memcpy(tmp.addr, rt[i].addr, 16);
+		tmp.addr = rt[i].addr;
 		tmp.depth = rt[i].depth;
 		tmp.nh = rt[i].nh;
 
-		memcpy(rt[i].addr, rt[j].addr, 16);
+		rt[i].addr = rt[j].addr;
 		rt[i].depth = rt[j].depth;
 		rt[i].nh = rt[j].nh;
 
-		memcpy(rt[j].addr, tmp.addr, 16);
+		rt[j].addr = tmp.addr;
 		rt[j].depth = tmp.depth;
 		rt[j].nh = tmp.nh;
 	}
@@ -364,7 +345,7 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 	uint32_t a, i, j, k = 0;
 
 	if (config.nb_routes_per_depth[0] != 0) {
-		memset(rt[k].addr, 0, 16);
+		memset(&rt[k].addr, 0, 16);
 		rt[k].depth = 0;
 		rt[k++].nh = rte_rand() & get_max_nh(nh_sz);
 	}
@@ -380,7 +361,7 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 				uint64_t rnd_val = get_rnd_rng((uint64_t)edge,
 					(uint64_t)(edge + step));
 				rnd = rte_cpu_to_be_32(rnd_val << (32 - i));
-				complete_v6_addr((uint32_t *)rt[k].addr,
+				complete_v6_addr((uint32_t *)&rt[k].addr,
 					rnd, a);
 				rt[k].depth = (a * 32) + i;
 				rt[k].nh = rte_rand() & get_max_nh(nh_sz);
@@ -390,19 +371,19 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 }
 
 static inline void
-set_rnd_ipv6(uint8_t *addr, uint8_t *route, int depth)
+set_rnd_ipv6(struct rte_ipv6_addr *addr, struct rte_ipv6_addr *route, int depth)
 {
 	int i;
 
 	for (i = 0; i < 16; i++)
-		addr[i] = rte_rand();
+		addr->a[i] = rte_rand();
 
 	for (i = 0; i < 16; i++) {
 		if (depth >= 8)
-			addr[i] = route[i];
+			addr->a[i] = route->a[i];
 		else if (depth > 0) {
-			addr[i] &= (uint16_t)UINT8_MAX >> depth;
-			addr[i] |= route[i] & UINT8_MAX << (8 - depth);
+			addr->a[i] &= (uint16_t)UINT8_MAX >> depth;
+			addr->a[i] |= route->a[i] & UINT8_MAX << (8 - depth);
 		} else
 			return;
 		depth -= 8;
@@ -413,7 +394,7 @@ static void
 gen_rnd_lookup_tbl(int af)
 {
 	uint32_t *tbl4 = config.lookup_tbl;
-	uint8_t *tbl6 = config.lookup_tbl;
+	struct rte_ipv6_addr *tbl6 = config.lookup_tbl;
 	struct rt_rule_4 *rt4 = (struct rt_rule_4 *)config.rt;
 	struct rt_rule_6 *rt6 = (struct rt_rule_6 *)config.rt;
 	uint32_t i, j;
@@ -432,11 +413,10 @@ gen_rnd_lookup_tbl(int af)
 		for (i = 0, j = 0; i < config.nb_lookup_ips;
 				i++, j = (j + 1) % config.nb_routes) {
 			if ((rte_rand() % 100) < config.rnd_lookup_ips_ratio) {
-				set_rnd_ipv6(&tbl6[i * 16], rt6[j].addr, 0);
+				set_rnd_ipv6(&tbl6[i], &rt6[j].addr, 0);
 				config.nb_lookup_ips_rnd++;
 			} else {
-				set_rnd_ipv6(&tbl6[i * 16], rt6[j].addr,
-					rt6[j].depth);
+				set_rnd_ipv6(&tbl6[i], &rt6[j].addr, rt6[j].depth);
 			}
 		}
 	}
@@ -522,7 +502,7 @@ parse_rt_6(FILE *f)
 			s = NULL;
 		}
 
-		ret = _inet_net_pton(AF_INET6, in[RT_PREFIX], rt[j].addr);
+		ret = _inet_net_pton(AF_INET6, in[RT_PREFIX], &rt[j].addr);
 		if (ret < 0)
 			return ret;
 
@@ -561,7 +541,7 @@ dump_lookup(int af)
 {
 	FILE *f;
 	uint32_t *tbl4 = config.lookup_tbl;
-	uint8_t *tbl6 = config.lookup_tbl;
+	struct rte_ipv6_addr *tbl6 = config.lookup_tbl;
 	uint32_t i;
 
 	f = fopen(config.lookup_ips_file_s, "w");
@@ -575,7 +555,7 @@ dump_lookup(int af)
 			fprintf(f, NIPQUAD_FMT"\n", NIPQUAD(tbl4[i]));
 	} else {
 		for (i = 0; i < config.nb_lookup_ips; i++)
-			fprintf(f, NIPQUAD6_FMT"\n", NIPQUAD6(&tbl6[i * 16]));
+			fprintf(f, RTE_IPV6_ADDR_FMT"\n", RTE_IPV6_ADDR_SPLIT(&tbl6[i * 16]));
 	}
 	fclose(f);
 	return 0;
@@ -1023,7 +1003,7 @@ dump_rt_6(struct rt_rule_6 *rt)
 	}
 
 	for (i = 0; i < config.nb_routes; i++) {
-		fprintf(f, NIPQUAD6_FMT"/%d %"PRIu64"\n", NIPQUAD6(rt[i].addr),
+		fprintf(f, RTE_IPV6_ADDR_FMT"/%d %"PRIu64"\n", RTE_IPV6_ADDR_SPLIT(&rt[i].addr),
 			rt[i].depth, rt[i].nh);
 
 	}
@@ -1043,7 +1023,7 @@ run_v6(void)
 	int ret = 0;
 	struct rte_lpm6	*lpm = NULL;
 	struct rte_lpm6_config lpm_conf;
-	uint8_t *tbl6;
+	struct rte_ipv6_addr *tbl6;
 	uint64_t fib_nh[BURST_SZ];
 	int32_t lpm_nh[BURST_SZ];
 
@@ -1094,7 +1074,7 @@ run_v6(void)
 	for (k = config.print_fract, i = 0; k > 0; k--) {
 		start = rte_rdtsc_precise();
 		for (j = 0; j < (config.nb_routes - i) / k; j++) {
-			ret = rte_fib6_add(fib, rt[i + j].addr,
+			ret = rte_fib6_add(fib, rt[i + j].addr.a,
 				rt[i + j].depth, rt[i + j].nh);
 			if (unlikely(ret != 0)) {
 				printf("Can not add a route to FIB, err %d\n",
@@ -1120,7 +1100,7 @@ run_v6(void)
 		for (k = config.print_fract, i = 0; k > 0; k--) {
 			start = rte_rdtsc_precise();
 			for (j = 0; j < (config.nb_routes - i) / k; j++) {
-				ret = rte_lpm6_add(lpm, rt[i + j].addr,
+				ret = rte_lpm6_add(lpm, &rt[i + j].addr,
 					rt[i + j].depth, rt[i + j].nh);
 				if (ret != 0) {
 					if (rt[i + j].depth == 0)
@@ -1139,7 +1119,7 @@ run_v6(void)
 	acc = 0;
 	for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 		start = rte_rdtsc_precise();
-		ret = rte_fib6_lookup_bulk(fib, (uint8_t (*)[16])(tbl6 + i*16),
+		ret = rte_fib6_lookup_bulk(fib, &tbl6[i].a,
 			fib_nh, BURST_SZ);
 		acc += rte_rdtsc_precise() - start;
 		if (ret != 0) {
@@ -1154,7 +1134,7 @@ run_v6(void)
 		for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 			start = rte_rdtsc_precise();
 			ret = rte_lpm6_lookup_bulk_func(lpm,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i],
 				lpm_nh, BURST_SZ);
 			acc += rte_rdtsc_precise() - start;
 			if (ret != 0) {
@@ -1166,10 +1146,10 @@ run_v6(void)
 
 		for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 			rte_fib6_lookup_bulk(fib,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i].a,
 				fib_nh, BURST_SZ);
 			rte_lpm6_lookup_bulk_func(lpm,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i],
 				lpm_nh, BURST_SZ);
 			for (j = 0; j < BURST_SZ; j++) {
 				if ((fib_nh[j] != (uint32_t)lpm_nh[j]) &&
@@ -1186,7 +1166,7 @@ run_v6(void)
 	for (k = config.print_fract, i = 0; k > 0; k--) {
 		start = rte_rdtsc_precise();
 		for (j = 0; j < (config.nb_routes - i) / k; j++)
-			rte_fib6_delete(fib, rt[i + j].addr, rt[i + j].depth);
+			rte_fib6_delete(fib, rt[i + j].addr.a, rt[i + j].depth);
 
 		printf("AVG FIB delete %"PRIu64"\n",
 			(rte_rdtsc_precise() - start) / j);
@@ -1197,7 +1177,7 @@ run_v6(void)
 		for (k = config.print_fract, i = 0; k > 0; k--) {
 			start = rte_rdtsc_precise();
 			for (j = 0; j < (config.nb_routes - i) / k; j++)
-				rte_lpm6_delete(lpm, rt[i + j].addr,
+				rte_lpm6_delete(lpm, &rt[i + j].addr,
 					rt[i + j].depth);
 
 			printf("AVG LPM delete %"PRIu64"\n",
diff --git a/app/test-pipeline/pipeline_lpm_ipv6.c b/app/test-pipeline/pipeline_lpm_ipv6.c
index 207ffbeff00f..6558e887c859 100644
--- a/app/test-pipeline/pipeline_lpm_ipv6.c
+++ b/app/test-pipeline/pipeline_lpm_ipv6.c
@@ -127,16 +127,11 @@ app_main_loop_worker_pipeline_lpm_ipv6(void) {
 
 		ip = rte_bswap32(i << (24 -
 			rte_popcount32(app.n_ports - 1)));
-		memcpy(key.ip, &ip, sizeof(uint32_t));
+		memcpy(&key.ip, &ip, sizeof(uint32_t));
 
 		printf("Adding rule to IPv6 LPM table (IPv6 destination = "
-			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
-			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => "
-			"port out = %u)\n",
-			key.ip[0], key.ip[1], key.ip[2], key.ip[3],
-			key.ip[4], key.ip[5], key.ip[6], key.ip[7],
-			key.ip[8], key.ip[9], key.ip[10], key.ip[11],
-			key.ip[12], key.ip[13], key.ip[14], key.ip[15],
+			RTE_IPV6_ADDR_FMT "/%u => port out = %u)\n",
+			RTE_IPV6_ADDR_SPLIT(&key.ip),
 			key.depth, i);
 
 		status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
diff --git a/app/test/test_fib6_perf.c b/app/test/test_fib6_perf.c
index fe713e7094e5..f03cd084aa64 100644
--- a/app/test/test_fib6_perf.c
+++ b/app/test/test_fib6_perf.c
@@ -101,7 +101,7 @@ test_fib6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		next_hop_add = (i & ((1 << 14) - 1)) + 1;
-		if (rte_fib6_add(fib, large_route_table[i].ip,
+		if (rte_fib6_add(fib, large_route_table[i].ip.a,
 				large_route_table[i].depth, next_hop_add) == 0)
 			status++;
 	}
@@ -117,7 +117,7 @@ test_fib6_perf(void)
 	count = 0;
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++)
-		memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+		memcpy(ip_batch[i], &large_ips_table[i].ip, 16);
 
 	for (i = 0; i < ITERATIONS; i++) {
 
@@ -140,7 +140,7 @@ test_fib6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		/* rte_fib_delete(fib, ip, depth) */
-		status += rte_fib6_delete(fib, large_route_table[i].ip,
+		status += rte_fib6_delete(fib, large_route_table[i].ip.a,
 				large_route_table[i].depth);
 	}
 
diff --git a/app/test/test_lpm6.c b/app/test/test_lpm6.c
index 1d8a0afa1155..b930fa3f0c17 100644
--- a/app/test/test_lpm6.c
+++ b/app/test/test_lpm6.c
@@ -92,30 +92,6 @@ rte_lpm6_test tests6[] = {
 #define MAX_NUM_TBL8S                                          (1 << 21)
 #define PASS 0
 
-static void
-IPv6(uint8_t *ip, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5,
-		uint8_t b6, uint8_t b7, uint8_t b8, uint8_t b9, uint8_t b10,
-		uint8_t b11, uint8_t b12, uint8_t b13, uint8_t b14, uint8_t b15,
-		uint8_t b16)
-{
-	ip[0] = b1;
-	ip[1] = b2;
-	ip[2] = b3;
-	ip[3] = b4;
-	ip[4] = b5;
-	ip[5] = b6;
-	ip[6] = b7;
-	ip[7] = b8;
-	ip[8] = b9;
-	ip[9] = b10;
-	ip[10] = b11;
-	ip[11] = b12;
-	ip[12] = b13;
-	ip[13] = b14;
-	ip[14] = b15;
-	ip[15] = b16;
-}
-
 /*
  * Check that rte_lpm6_create fails gracefully for incorrect user input
  * arguments
@@ -250,7 +226,7 @@ test4(void)
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
 
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24, next_hop = 100;
 	int32_t status = 0;
 
@@ -259,7 +235,7 @@ test4(void)
 	config.flags = 0;
 
 	/* rte_lpm6_add: lpm == NULL */
-	status = rte_lpm6_add(NULL, ip, depth, next_hop);
+	status = rte_lpm6_add(NULL, &ip, depth, next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -267,11 +243,11 @@ test4(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_add: depth < 1 */
-	status = rte_lpm6_add(lpm, ip, 0, next_hop);
+	status = rte_lpm6_add(lpm, &ip, 0, next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_add: depth > MAX_DEPTH */
-	status = rte_lpm6_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
+	status = rte_lpm6_add(lpm, &ip, (MAX_DEPTH + 1), next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -288,7 +264,7 @@ test5(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24;
 	int32_t status = 0;
 
@@ -297,7 +273,7 @@ test5(void)
 	config.flags = 0;
 
 	/* rte_lpm_delete: lpm == NULL */
-	status = rte_lpm6_delete(NULL, ip, depth);
+	status = rte_lpm6_delete(NULL, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -305,11 +281,11 @@ test5(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm_delete: depth < 1 */
-	status = rte_lpm6_delete(lpm, ip, 0);
+	status = rte_lpm6_delete(lpm, &ip, 0);
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm_delete: depth > MAX_DEPTH */
-	status = rte_lpm6_delete(lpm, ip, (MAX_DEPTH + 1));
+	status = rte_lpm6_delete(lpm, &ip, (MAX_DEPTH + 1));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -326,7 +302,7 @@ test6(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint32_t next_hop_return = 0;
 	int32_t status = 0;
 
@@ -335,7 +311,7 @@ test6(void)
 	config.flags = 0;
 
 	/* rte_lpm6_lookup: lpm == NULL */
-	status = rte_lpm6_lookup(NULL, ip, &next_hop_return);
+	status = rte_lpm6_lookup(NULL, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -347,7 +323,7 @@ test6(void)
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_lookup: next_hop = NULL */
-	status = rte_lpm6_lookup(lpm, ip, NULL);
+	status = rte_lpm6_lookup(lpm, &ip, NULL);
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -364,7 +340,7 @@ test7(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[10][16];
+	struct rte_ipv6_addr ips[10];
 	int32_t next_hop_return[10];
 	int32_t status = 0;
 
@@ -373,7 +349,7 @@ test7(void)
 	config.flags = 0;
 
 	/* rte_lpm6_lookup: lpm == NULL */
-	status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10);
+	status = rte_lpm6_lookup_bulk_func(NULL, ips, next_hop_return, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -381,11 +357,11 @@ test7(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_lookup: ip = NULL */
-	status = rte_lpm6_lookup_bulk_func(lpm, NULL, next_hop_return, 10);
+	status = rte_lpm6_lookup_bulk_func(lpm, NULL, next_hop_return, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_lookup: next_hop = NULL */
-	status = rte_lpm6_lookup_bulk_func(lpm, ip, NULL, 10);
+	status = rte_lpm6_lookup_bulk_func(lpm, ips, NULL, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -402,7 +378,7 @@ test8(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[10][16];
+	struct rte_ipv6_addr ips[10];
 	uint8_t depth[10];
 	int32_t status = 0;
 
@@ -411,7 +387,7 @@ test8(void)
 	config.flags = 0;
 
 	/* rte_lpm6_delete: lpm == NULL */
-	status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10);
+	status = rte_lpm6_delete_bulk_func(NULL, ips, depth, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -419,11 +395,11 @@ test8(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_delete: ip = NULL */
-	status = rte_lpm6_delete_bulk_func(lpm, NULL, depth, 10);
+	status = rte_lpm6_delete_bulk_func(lpm, NULL, depth, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_delete: next_hop = NULL */
-	status = rte_lpm6_delete_bulk_func(lpm, ip, NULL, 10);
+	status = rte_lpm6_delete_bulk_func(lpm, ips, NULL, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -441,7 +417,7 @@ test9(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 16;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -454,21 +430,21 @@ test9(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	for (i = 0; i < UINT8_MAX; i++) {
-		ip[2] = i;
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		ip.a[2] = i;
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 	}
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	for (i = 0; i < UINT8_MAX; i++) {
-		ip[2] = i;
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		ip.a[2] = i;
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -486,7 +462,7 @@ test10(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -501,20 +477,20 @@ test10(void)
 
 	for (i = 1; i < 128; i++) {
 		depth = (uint8_t)i;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 127;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -531,7 +507,7 @@ test11(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -544,37 +520,37 @@ test11(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 1;
+	ip.a[0] = 1;
 	depth = 25;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 33;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 49;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -592,7 +568,7 @@ test12(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -605,16 +581,16 @@ test12(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 1;
+	ip.a[0] = 1;
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 49;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	rte_lpm6_free(lpm);
@@ -631,7 +607,7 @@ test13(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -644,23 +620,23 @@ test13(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 2;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 3;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 2;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 3;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -679,7 +655,7 @@ test14(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 25;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -693,24 +669,24 @@ test14(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	for (i = 0; i < 256; i++) {
-		ip[0] = (uint8_t)i;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		ip.a[0] = (uint8_t)i;
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
-	ip[0] = 255;
-	ip[1] = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	ip.a[0] = 255;
+	ip.a[1] = 1;
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
-	ip[0] = 255;
-	ip[1] = 0;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	ip.a[0] = 255;
+	ip.a[1] = 0;
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 255;
-	ip[1] = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	ip.a[0] = 255;
+	ip.a[1] = 1;
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -726,7 +702,7 @@ test15(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -738,16 +714,16 @@ test15(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -763,7 +739,7 @@ test16(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {12,12,1,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6(0x0c0c, 0x0100, 0, 0, 0, 0, 0, 0);
 	uint8_t depth = 128;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -775,16 +751,16 @@ test16(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -806,9 +782,9 @@ test17(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip1[] = {127,255,255,255,255,255,255,255,255,
-			255,255,255,255,255,255,255};
-	uint8_t ip2[] = {128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip1 =
+		RTE_IPV6(0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff);
+	struct rte_ipv6_addr ip2 = RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -825,14 +801,14 @@ test17(void)
 		/* Let the next_hop_add value = depth. Just for change. */
 		next_hop_add = depth;
 
-		status = rte_lpm6_add(lpm, ip2, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip2, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		/* Check IP in first half of tbl24 which should be empty. */
-		status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 
-		status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip2, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 			(next_hop_return == next_hop_add));
 	}
@@ -841,10 +817,10 @@ test17(void)
 	for (depth = 16; depth >= 1; depth--) {
 		next_hop_add = (depth - 1);
 
-		status = rte_lpm6_delete(lpm, ip2, depth);
+		status = rte_lpm6_delete(lpm, &ip2, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip2, &next_hop_return);
 
 		if (depth != 1) {
 			TEST_LPM_ASSERT((status == 0) &&
@@ -854,7 +830,7 @@ test17(void)
 			TEST_LPM_ASSERT(status == -ENOENT);
 		}
 
-		status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -874,7 +850,7 @@ test18(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16], ip_1[16], ip_2[16];
+	struct rte_ipv6_addr ip, ip_1, ip_2;
 	uint8_t depth, depth_1, depth_2;
 	uint32_t next_hop_add, next_hop_add_1,
 			next_hop_add_2, next_hop_return;
@@ -885,58 +861,58 @@ test18(void)
 	config.flags = 0;
 
 	/* Add & lookup to hit invalid TBL24 entry */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Add & lookup to hit valid TBL24 entry not extended */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 23;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 23;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -944,37 +920,37 @@ test18(void)
 	/* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
 	 * entry.
 	 */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x0005, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -982,38 +958,38 @@ test18(void)
 	/* Add & lookup to hit valid extended TBL24 entry with valid TBL8
 	 * entry
 	 */
-	IPv6(ip_1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_1 = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth_1 = 25;
 	next_hop_add_1 = 101;
 
-	IPv6(ip_2, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_2 = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x0005, 0, 0, 0, 0, 0, 0);
 	depth_2 = 32;
 	next_hop_add_2 = 102;
 
 	next_hop_return = 0;
 
-	status = rte_lpm6_add(lpm, ip_1, depth_1, next_hop_add_1);
+	status = rte_lpm6_add(lpm, &ip_1, depth_1, next_hop_add_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_1, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
-	status = rte_lpm6_add(lpm, ip_2, depth_2, next_hop_add_2);
+	status = rte_lpm6_add(lpm, &ip_2, depth_2, next_hop_add_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
 
-	status = rte_lpm6_delete(lpm, ip_2, depth_2);
+	status = rte_lpm6_delete(lpm, &ip_2, depth_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
-	status = rte_lpm6_delete(lpm, ip_1, depth_1);
+	status = rte_lpm6_delete(lpm, &ip_1, depth_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_1, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1037,7 +1013,7 @@ test19(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1052,35 +1028,35 @@ test19(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 16;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 25;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_delete_all(lpm);
@@ -1090,45 +1066,45 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x000a, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
-	IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x000a, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -1138,28 +1114,28 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -1169,56 +1145,56 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Delete a rule that is not present in the TBL24 & lookup */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Delete a rule that is not present in the TBL8 & lookup */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1236,7 +1212,7 @@ test20(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1248,45 +1224,45 @@ test20(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0x000a);
 	depth = 128;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0x000a);
 	depth = 128;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1304,7 +1280,7 @@ test21(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_batch[4][16];
+	struct rte_ipv6_addr ip_batch[4];
 	uint8_t depth;
 	uint32_t next_hop_add;
 	int32_t next_hop_return[4];
@@ -1317,28 +1293,28 @@ test21(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[0] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0001, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip_batch[0], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[0], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[1] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0002, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip_batch[1], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[1], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[2] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0003, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 102;
 
-	status = rte_lpm6_add(lpm, ip_batch[2], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[2], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[3] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0004, 0, 0, 0, 0, 0);
 
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
 			next_hop_return, 4);
@@ -1363,7 +1339,7 @@ test22(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_batch[5][16];
+	struct rte_ipv6_addr ip_batch[5];
 	uint8_t depth[5];
 	uint32_t next_hop_add;
 	int32_t next_hop_return[5];
@@ -1378,39 +1354,39 @@ test22(void)
 
 	/* Adds 5 rules and look them up */
 
-	IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[0] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0001, 0, 0, 0, 0, 0);
 	depth[0] = 48;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip_batch[0], depth[0], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[0], depth[0], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[1] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0002, 0, 0, 0, 0, 0);
 	depth[1] = 48;
 	next_hop_add = 102;
 
-	status = rte_lpm6_add(lpm, ip_batch[1], depth[1], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[1], depth[1], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[2] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0003, 0, 0, 0, 0, 0);
 	depth[2] = 48;
 	next_hop_add = 103;
 
-	status = rte_lpm6_add(lpm, ip_batch[2], depth[2], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[2], depth[2], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[3] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0004, 0, 0, 0, 0, 0);
 	depth[3] = 48;
 	next_hop_add = 104;
 
-	status = rte_lpm6_add(lpm, ip_batch[3], depth[3], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[3], depth[3], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0005, 0, 0, 0, 0, 0);
 	depth[4] = 48;
 	next_hop_add = 105;
 
-	status = rte_lpm6_add(lpm, ip_batch[4], depth[4], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[4], depth[4], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
@@ -1443,11 +1419,11 @@ test22(void)
 
 	/* Use the delete_bulk function to delete two, one invalid. Lookup again */
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0006, 0, 0, 0, 0, 0);
 	status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[3], depth, 2);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0005, 0, 0, 0, 0, 0);
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
 			next_hop_return, 5);
 	TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
@@ -1481,7 +1457,7 @@ test23(void)
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
 	uint32_t i;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1493,22 +1469,22 @@ test23(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 128;
 	next_hop_add = 100;
 
 	for (i = 0; i < 30; i++) {
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add));
 
-		status = rte_lpm6_delete(lpm, ip, depth);
+		status = rte_lpm6_delete(lpm, &ip, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -1565,7 +1541,7 @@ test25(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint32_t i;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return, next_hop_expected;
@@ -1579,10 +1555,10 @@ test25(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	for (i = 0; i < 1000; i++) {
-		memcpy(ip, large_route_table[i].ip, 16);
+		ip = large_route_table[i].ip;
 		depth = large_route_table[i].depth;
 		next_hop_add = large_route_table[i].next_hop;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
@@ -1590,10 +1566,10 @@ test25(void)
 	generate_large_ips_table(1);
 
 	for (i = 0; i < 100000; i++) {
-		memcpy(ip, large_ips_table[i].ip, 16);
+		ip = large_ips_table[i].ip;
 		next_hop_expected = large_ips_table[i].next_hop;
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_expected));
 	}
@@ -1615,9 +1591,9 @@ test26(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_10_32[] = {10, 10, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_10_24[] = {10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_20_25[] = {10, 10, 20, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	struct rte_ipv6_addr ip_10_32 = RTE_IPV6(0x0a0a, 0x0a02, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_10_24 = RTE_IPV6(0x0a0a, 0x0a00, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_20_25 = RTE_IPV6(0x0a0a, 0x1402, 0, 0, 0, 0, 0, 0);
 	uint8_t d_ip_10_32 = 32;
 	uint8_t	d_ip_10_24 = 24;
 	uint8_t	d_ip_20_25 = 25;
@@ -1634,29 +1610,26 @@ test26(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	if ((status = rte_lpm6_add(lpm, ip_10_32, d_ip_10_32,
-			next_hop_ip_10_32)) < 0)
-		return -1;
+	status = rte_lpm6_add(lpm, &ip_10_32, d_ip_10_32, next_hop_ip_10_32);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_32, &next_hop_return);
 	uint32_t test_hop_10_32 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
-	if ((status = rte_lpm6_add(lpm, ip_10_24, d_ip_10_24,
-			next_hop_ip_10_24)) < 0)
-			return -1;
+	status = rte_lpm6_add(lpm, &ip_10_24, d_ip_10_24, next_hop_ip_10_24);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_24, &next_hop_return);
 	uint32_t test_hop_10_24 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
-	if ((status = rte_lpm6_add(lpm, ip_20_25, d_ip_20_25,
-			next_hop_ip_20_25)) < 0)
-		return -1;
+	status = rte_lpm6_add(lpm, &ip_20_25, d_ip_20_25, next_hop_ip_20_25);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_20_25, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_20_25, &next_hop_return);
 	uint32_t test_hop_20_25 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
@@ -1671,11 +1644,11 @@ test26(void)
 		return -1;
 	}
 
-	status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_32, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
-	status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_24, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
@@ -1695,7 +1668,8 @@ test27(void)
 {
 		struct rte_lpm6 *lpm = NULL;
 		struct rte_lpm6_config config;
-		uint8_t ip[] = {128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,0};
+		struct rte_ipv6_addr ip =
+			RTE_IPV6(0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0);
 		uint8_t depth = 128;
 		uint32_t next_hop_add = 100, next_hop_return;
 		int32_t status = 0;
@@ -1710,19 +1684,19 @@ test27(void)
 
 		depth = 128;
 		next_hop_add = 128;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		depth = 112;
 		next_hop_add = 112;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		for (i = 0; i < 256; i++) {
-			ip[14] = (uint8_t)i;
+			ip.a[14] = i;
 			for (j = 0; j < 256; j++) {
-				ip[15] = (uint8_t)j;
-				status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+				ip.a[15] = j;
+				status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 				if (i == 0 && j == 0)
 					TEST_LPM_ASSERT(status == 0 && next_hop_return == 128);
 				else
@@ -1746,7 +1720,7 @@ test28(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	struct rte_ipv6_addr ip = RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0);
 	uint8_t depth = 16;
 	uint32_t next_hop_add = 0x001FFFFF, next_hop_return = 0;
 	int32_t status = 0;
@@ -1758,13 +1732,13 @@ test28(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 	rte_lpm6_free(lpm);
 
diff --git a/app/test/test_lpm6_data.h b/app/test/test_lpm6_data.h
index 8ddb59563ee4..2a20b9ec36f1 100644
--- a/app/test/test_lpm6_data.h
+++ b/app/test/test_lpm6_data.h
@@ -7,16 +7,17 @@
 #include <stdint.h>
 #include <stdlib.h>
 
+#include <rte_ip6.h>
 #include <rte_random.h>
 
 struct rules_tbl_entry {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t next_hop;
 };
 
 struct ips_tbl_entry {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t next_hop;
 };
 
@@ -29,1006 +30,1006 @@ struct ips_tbl_entry {
  */
 
 static struct rules_tbl_entry large_route_table[] = {
-	{{66, 70, 154, 143, 197, 233, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 146},
-	{{107, 79, 18, 235, 142, 84, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 141},
-	{{247, 132, 113, 1, 215, 247, 183, 239, 128, 0, 0, 0, 0, 0, 0, 0}, 67, 23},
-	{{48, 19, 41, 12, 76, 101, 114, 160, 45, 103, 134, 146, 128, 0, 0, 0}, 97, 252},
-	{{5, 70, 208, 170, 19, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 6},
-	{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 137},
-	{{12, 188, 26, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 9},
-	{{1, 235, 101, 202, 26, 92, 23, 22, 179, 223, 128, 0, 0, 0, 0, 0}, 82, 9},
-	{{215, 19, 224, 102, 45, 133, 102, 249, 56, 20, 214, 219, 93, 125, 52, 0}, 120, 163},
-	{{178, 183, 109, 64, 136, 84, 11, 53, 217, 102, 0, 0, 0, 0, 0, 0}, 79, 197},
-	{{212, 39, 158, 71, 253, 98, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 249},
-	{{92, 58, 159, 130, 105, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 88},
-	{{118, 140, 65, 198, 212, 93, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 104},
-	{{86, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 36},
-	{{79, 135, 242, 193, 197, 11, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 239},
-	{{163, 228, 239, 80, 41, 66, 176, 176, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 201},
-	{{31, 9, 231, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 94},
-	{{108, 144, 205, 39, 215, 26, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 241},
-	{{247, 217, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 239},
-	{{24, 186, 73, 182, 240, 251, 125, 165, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 151},
-	{{245, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 137},
-	{{44, 94, 138, 224, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 231},
-	{{184, 221, 109, 135, 225, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 11},
-	{{51, 179, 136, 184, 30, 118, 24, 16, 26, 161, 206, 101, 0, 0, 0, 0}, 96, 20},
-	{{48, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 68},
-	{{143, 235, 237, 220, 89, 119, 187, 143, 209, 94, 46, 58, 120, 0, 0, 0}, 101, 64},
-	{{121, 190, 90, 177, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 152},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 217},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 101},
-	{{111, 214, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 58},
-	{{162, 23, 52, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 254},
-	{{76, 103, 44, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 148},
-	{{80, 85, 219, 214, 12, 4, 65, 129, 162, 148, 208, 78, 39, 69, 94, 184}, 126, 126},
-	{{80, 54, 251, 28, 152, 23, 244, 192, 151, 83, 6, 144, 223, 213, 224, 128}, 123, 76},
-	{{39, 232, 237, 103, 191, 188, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 240},
-	{{20, 231, 89, 210, 167, 173, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 33},
-	{{125, 67, 198, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 47},
-	{{26, 239, 153, 5, 213, 121, 31, 114, 161, 46, 84, 15, 148, 160, 0, 0}, 109, 41},
-	{{102, 212, 159, 118, 223, 115, 134, 172, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 72},
-	{{85, 181, 241, 127, 3, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 43},
-	{{61, 199, 131, 226, 3, 230, 94, 119, 240, 0, 0, 0, 0, 0, 0, 0}, 68, 26},
-	{{0, 143, 160, 184, 162, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 139},
-	{{170, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 219},
-	{{61, 122, 24, 251, 124, 122, 202, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 105},
-	{{33, 219, 226, 3, 180, 190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 210},
-	{{51, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 151},
-	{{106, 185, 11, 122, 197, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 28},
-	{{192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 64},
-	{{239, 195, 77, 239, 131, 156, 2, 246, 191, 178, 204, 160, 21, 213, 30, 128}, 121, 9},
-	{{141, 207, 181, 99, 55, 245, 151, 228, 65, 50, 85, 16, 0, 0, 0, 0}, 92, 250},
-	{{110, 159, 230, 251, 224, 210, 58, 49, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 200},
-	{{134, 26, 104, 32, 129, 41, 201, 50, 164, 69, 178, 156, 156, 133, 8, 218}, 127, 132},
-	{{253, 207, 116, 105, 210, 166, 186, 99, 182, 0, 0, 0, 0, 0, 0, 0}, 71, 182},
-	{{211, 73, 38, 80, 183, 168, 52, 138, 25, 214, 112, 8, 252, 0, 0, 0}, 102, 7},
-	{{200, 244, 108, 238, 164, 141, 215, 39, 233, 249, 120, 80, 112, 0, 0, 0}, 100, 146},
-	{{107, 44, 250, 202, 64, 37, 107, 105, 140, 0, 0, 0, 0, 0, 0, 0}, 70, 98},
-	{{93, 86, 56, 27, 159, 195, 126, 39, 240, 201, 48, 0, 0, 0, 0, 0}, 86, 179},
-	{{32, 202, 214, 242, 39, 141, 61, 146, 138, 96, 0, 0, 0, 0, 0, 0}, 77, 245},
-	{{167, 77, 249, 28, 210, 196, 227, 241, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
-	{{241, 59, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 5},
-	{{143, 68, 146, 210, 173, 155, 251, 173, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 169},
-	{{167, 180, 226, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 52},
-	{{241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 177},
-	{{238, 9, 168, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 74},
-	{{203, 148, 16, 96, 125, 18, 86, 1, 91, 244, 251, 20, 31, 14, 75, 128}, 122, 212},
-	{{111, 227, 137, 94, 65, 21, 77, 137, 119, 130, 159, 19, 159, 45, 18, 192}, 122, 238},
-	{{59, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 18},
-	{{110, 192, 255, 120, 84, 215, 3, 130, 38, 224, 0, 0, 0, 0, 0, 0}, 75, 155},
-	{{152, 79, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 97},
-	{{118, 186, 157, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 8},
-	{{70, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 123},
-	{{253, 119, 114, 227, 18, 243, 81, 61, 238, 107, 190, 144, 0, 0, 0, 0}, 92, 11},
-	{{166, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 211},
-	{{43, 95, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 116},
-	{{94, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 57},
-	{{182, 251, 195, 132, 66, 7, 208, 146, 223, 231, 211, 181, 25, 176, 0, 0}, 108, 178},
-	{{152, 166, 111, 233, 194, 17, 230, 41, 221, 253, 69, 123, 108, 0, 0, 0}, 102, 93},
-	{{106, 141, 235, 190, 82, 241, 152, 186, 195, 81, 86, 144, 0, 0, 0, 0}, 92, 3},
-	{{32, 81, 210, 153, 151, 29, 11, 62, 127, 177, 194, 254, 103, 83, 58, 128}, 121, 162},
-	{{79, 112, 224, 26, 174, 39, 98, 181, 115, 57, 209, 189, 136, 48, 0, 0}, 109, 125},
-	{{106, 197, 83, 151, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 33},
-	{{190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 254},
-	{{156, 73, 249, 148, 55, 192, 20, 42, 142, 128, 0, 0, 0, 0, 0, 0}, 74, 66},
-	{{64, 107, 36, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 4},
-	{{115, 148, 71, 250, 158, 174, 168, 249, 106, 110, 196, 0, 0, 0, 0, 0}, 86, 122},
-	{{18, 139, 152, 44, 38, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 59},
-	{{55, 229, 117, 106, 146, 95, 74, 220, 122, 0, 84, 202, 183, 138, 120, 0}, 117, 99},
-	{{153, 211, 3, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 41},
-	{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 112},
-	{{49, 192, 102, 142, 216, 3, 114, 64, 165, 128, 168, 0, 0, 0, 0, 0}, 85, 255},
-	{{201, 143, 240, 240, 209, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 106},
-	{{158, 19, 164, 196, 87, 162, 33, 120, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 170},
-	{{5, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 86},
-	{{34, 170, 246, 62, 198, 85, 193, 227, 252, 68, 0, 0, 0, 0, 0, 0}, 79, 155},
-	{{21, 52, 9, 86, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 65},
-	{{203, 81, 49, 171, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 39},
-	{{211, 218, 87, 244, 93, 181, 118, 41, 156, 143, 254, 0, 0, 0, 0, 0}, 90, 162},
-	{{77, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 69},
-	{{158, 219, 219, 39, 4, 219, 100, 63, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 163},
-	{{61, 50, 232, 1, 185, 252, 243, 54, 189, 240, 170, 192, 0, 0, 0, 0}, 90, 116},
-	{{241, 143, 33, 19, 247, 55, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 19},
-	{{61, 28, 61, 252, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 48},
-	{{102, 112, 194, 108, 90, 253, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 230},
-	{{74, 88, 58, 66, 172, 41, 144, 204, 195, 240, 0, 0, 0, 0, 0, 0}, 78, 155},
-	{{44, 148, 187, 58, 190, 59, 190, 187, 124, 138, 222, 131, 0, 0, 0, 0}, 96, 158},
-	{{67, 7, 216, 139, 93, 224, 20, 135, 186, 86, 209, 111, 60, 80, 0, 0}, 113, 252},
-	{{209, 26, 12, 174, 5, 101, 164, 181, 237, 63, 192, 57, 54, 120, 0, 0}, 110, 176},
-	{{4, 66, 232, 52, 239, 56, 48, 58, 192, 0, 0, 0, 0, 0, 0, 0}, 66, 211},
-	{{158, 165, 2, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 15},
-	{{85, 204, 245, 198, 68, 44, 39, 71, 32, 0, 0, 0, 0, 0, 0, 0}, 68, 95},
-	{{181, 134, 25, 87, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 169},
-	{{26, 230, 61, 36, 79, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 249},
-	{{5, 170, 198, 139, 65, 186, 188, 45, 42, 253, 165, 89, 206, 0, 0, 0}, 105, 61},
-	{{211, 245, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 63},
-	{{117, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 43},
-	{{103, 17, 123, 102, 70, 206, 90, 92, 124, 198, 0, 0, 0, 0, 0, 0}, 81, 228},
-	{{192, 237, 88, 244, 53, 30, 61, 160, 143, 64, 0, 0, 0, 0, 0, 0}, 78, 165},
-	{{199, 82, 217, 183, 2, 179, 195, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
-	{{157, 230, 79, 162, 57, 125, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 211},
-	{{27, 67, 64, 235, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 210},
-	{{72, 158, 163, 106, 193, 137, 190, 7, 250, 165, 249, 73, 64, 0, 0, 0}, 99, 61},
-	{{34, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 120},
-	{{215, 141, 95, 192, 189, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 94},
-	{{31, 181, 56, 141, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 153},
-	{{153, 73, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 221},
-	{{162, 107, 41, 189, 165, 155, 22, 139, 165, 72, 96, 0, 0, 0, 0, 0}, 87, 163},
-	{{218, 17, 204, 165, 217, 251, 107, 45, 29, 15, 192, 167, 75, 0, 0, 0}, 106, 188},
-	{{200, 124, 238, 213, 35, 228, 94, 141, 86, 187, 101, 60, 115, 52, 131, 16}, 124, 15},
-	{{74, 237, 160, 56, 141, 217, 191, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 28},
-	{{163, 47, 242, 103, 173, 217, 88, 154, 38, 200, 32, 0, 0, 0, 0, 0}, 84, 240},
-	{{20, 227, 128, 28, 144, 147, 22, 13, 94, 129, 107, 88, 0, 0, 0, 0}, 93, 59},
-	{{95, 144, 229, 107, 218, 125, 204, 233, 161, 42, 180, 64, 0, 0, 0, 0}, 90, 195},
-	{{155, 220, 83, 208, 108, 16, 134, 156, 128, 0, 0, 0, 0, 0, 0, 0}, 66, 10},
-	{{179, 138, 55, 80, 190, 153, 12, 237, 22, 120, 69, 0, 0, 0, 0, 0}, 88, 206},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 137},
-	{{3, 119, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 225},
-	{{13, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 223},
-	{{117, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 29},
-	{{164, 19, 195, 47, 136, 190, 156, 255, 30, 74, 143, 134, 162, 0, 0, 0}, 103, 166},
-	{{40, 235, 94, 135, 135, 230, 71, 33, 64, 233, 0, 0, 0, 0, 0, 0}, 80, 178},
-	{{222, 151, 166, 97, 129, 250, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 38},
-	{{174, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 141},
-	{{6, 189, 100, 150, 250, 13, 46, 98, 228, 139, 50, 52, 52, 196, 128, 0}, 116, 230},
-	{{75, 252, 89, 205, 37, 52, 106, 79, 188, 120, 54, 119, 160, 0, 0, 0}, 99, 124},
-	{{38, 18, 146, 6, 63, 64, 231, 10, 152, 199, 5, 143, 147, 4, 252, 0}, 118, 54},
-	{{111, 119, 169, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 162},
-	{{105, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 32},
-	{{143, 57, 57, 101, 98, 182, 74, 227, 205, 143, 253, 237, 8, 0, 0, 0}, 102, 237},
-	{{30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 215},
-	{{14, 232, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 138},
-	{{14, 53, 67, 216, 229, 155, 149, 139, 31, 253, 184, 126, 133, 108, 40, 0}, 118, 73},
-	{{22, 58, 40, 143, 188, 132, 239, 14, 181, 252, 81, 192, 0, 0, 0, 0}, 90, 43},
-	{{11, 222, 185, 243, 248, 150, 79, 230, 214, 213, 3, 23, 193, 196, 0, 0}, 112, 88},
-	{{14, 226, 198, 117, 84, 93, 22, 96, 77, 241, 173, 68, 68, 204, 72, 0}, 119, 91},
-	{{15, 103, 247, 219, 150, 142, 92, 50, 144, 0, 0, 0, 0, 0, 0, 0}, 69, 140},
-	{{0, 213, 77, 244, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 65},
-	{{178, 174, 174, 239, 72, 181, 36, 217, 40, 169, 12, 104, 149, 157, 125, 128}, 122, 201},
-	{{118, 53, 55, 17, 97, 227, 243, 176, 2, 0, 0, 0, 0, 0, 0, 0}, 72, 69},
-	{{21, 253, 4, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 170},
-	{{5, 249, 186, 133, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 192},
-	{{47, 79, 35, 66, 11, 178, 161, 28, 87, 180, 45, 128, 0, 0, 0, 0}, 89, 21},
-	{{242, 227, 20, 73, 150, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 35},
-	{{121, 169, 102, 118, 157, 192, 154, 186, 126, 0, 0, 0, 0, 0, 0, 0}, 71, 235},
-	{{9, 138, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 240},
-	{{45, 173, 14, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 136},
-	{{127, 47, 51, 201, 236, 45, 142, 80, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 186},
-	{{247, 233, 34, 38, 181, 207, 127, 20, 224, 118, 59, 148, 0, 0, 0, 0}, 95, 174},
-	{{126, 187, 198, 104, 245, 223, 219, 18, 31, 124, 0, 0, 0, 0, 0, 0}, 79, 153},
-	{{3, 163, 107, 228, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 118},
-	{{167, 109, 2, 95, 11, 62, 45, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 113},
-	{{76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 58},
-	{{58, 190, 204, 151, 222, 147, 47, 78, 38, 203, 9, 17, 64, 0, 0, 0}, 101, 206},
-	{{254, 220, 254, 220, 204, 79, 35, 127, 242, 63, 106, 232, 127, 180, 0, 0}, 111, 42},
-	{{77, 156, 8, 209, 181, 37, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 230},
-	{{65, 89, 137, 76, 208, 199, 166, 90, 128, 0, 0, 0, 0, 0, 0, 0}, 67, 6},
-	{{47, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 254},
-	{{172, 154, 12, 108, 77, 37, 106, 8, 234, 7, 248, 212, 112, 160, 0, 0}, 108, 214},
-	{{254, 117, 239, 244, 154, 89, 166, 241, 12, 108, 127, 153, 206, 160, 0, 0}, 107, 43},
-	{{113, 160, 206, 52, 143, 12, 9, 148, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 178},
-	{{178, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 179},
-	{{229, 177, 28, 106, 59, 75, 182, 241, 36, 79, 224, 0, 0, 0, 0, 0}, 87, 236},
-	{{156, 72, 93, 193, 50, 235, 75, 228, 88, 115, 89, 119, 128, 0, 0, 0}, 98, 184},
-	{{28, 232, 28, 249, 83, 105, 211, 7, 136, 147, 231, 64, 0, 0, 0, 0}, 91, 95},
-	{{217, 33, 23, 107, 74, 42, 135, 197, 144, 34, 40, 243, 13, 126, 36, 136}, 127, 152},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 113},
-	{{85, 172, 121, 126, 213, 57, 225, 54, 197, 73, 85, 251, 9, 64, 0, 0}, 108, 137},
-	{{104, 46, 25, 71, 86, 220, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 224},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 61},
-	{{241, 113, 254, 106, 53, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 205},
-	{{29, 36, 12, 244, 197, 127, 240, 8, 167, 134, 154, 248, 199, 123, 143, 240}, 124, 170},
-	{{58, 29, 129, 94, 43, 139, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 117},
-	{{213, 124, 147, 196, 7, 82, 67, 70, 228, 0, 0, 0, 0, 0, 0, 0}, 70, 225},
-	{{164, 168, 161, 140, 87, 85, 250, 41, 34, 0, 0, 0, 0, 0, 0, 0}, 72, 34},
-	{{186, 142, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 5},
-	{{237, 249, 9, 70, 247, 97, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 92},
-	{{155, 92, 145, 218, 125, 226, 226, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 230},
-	{{35, 169, 62, 156, 86, 4, 125, 219, 119, 113, 191, 75, 198, 113, 0, 0}, 112, 61},
-	{{207, 63, 96, 186, 26, 68, 115, 161, 163, 59, 190, 166, 18, 78, 232, 0}, 117, 221},
-	{{86, 40, 200, 199, 247, 86, 159, 179, 191, 184, 117, 173, 211, 158, 0, 128}, 121, 105},
-	{{104, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 181},
-	{{205, 35, 123, 178, 36, 64, 62, 153, 195, 250, 0, 0, 0, 0, 0, 0}, 79, 110},
-	{{117, 40, 57, 157, 138, 160, 223, 59, 155, 145, 64, 0, 0, 0, 0, 0}, 86, 103},
-	{{74, 166, 140, 146, 74, 72, 229, 99, 167, 124, 107, 117, 217, 14, 246, 64}, 123, 218},
-	{{12, 222, 244, 183, 83, 146, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 146},
-	{{11, 98, 146, 110, 95, 96, 80, 142, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 90},
-	{{235, 5, 187, 199, 30, 170, 82, 187, 228, 159, 22, 25, 204, 112, 0, 0}, 108, 197},
-	{{35, 96, 146, 145, 155, 116, 252, 181, 29, 205, 230, 246, 30, 0, 0, 0}, 103, 158},
-	{{174, 38, 56, 244, 227, 102, 252, 237, 128, 86, 0, 0, 0, 0, 0, 0}, 81, 118},
-	{{65, 134, 37, 58, 90, 125, 60, 84, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 95},
-	{{253, 117, 135, 98, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 152},
-	{{111, 115, 188, 184, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 239},
-	{{202, 24, 89, 9, 149, 45, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 48},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 228},
-	{{244, 98, 52, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 247},
-	{{151, 167, 43, 178, 116, 194, 173, 126, 236, 98, 40, 0, 0, 0, 0, 0}, 85, 12},
-	{{60, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 129},
-	{{208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 50},
-	{{126, 11, 216, 242, 7, 45, 121, 208, 110, 135, 210, 75, 59, 182, 228, 42}, 128, 250},
-	{{217, 26, 184, 146, 3, 18, 240, 15, 135, 8, 0, 0, 0, 0, 0, 0}, 77, 249},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 230},
-	{{145, 28, 29, 184, 2, 85, 234, 135, 98, 111, 136, 32, 0, 0, 0, 0}, 92, 228},
-	{{108, 104, 255, 254, 34, 95, 72, 157, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 181},
-	{{153, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 206},
-	{{22, 250, 130, 201, 132, 248, 189, 108, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 122},
-	{{158, 165, 234, 18, 44, 61, 82, 61, 235, 0, 0, 0, 0, 0, 0, 0}, 72, 81},
-	{{236, 57, 124, 110, 124, 218, 82, 70, 142, 78, 18, 128, 0, 0, 0, 0}, 95, 175},
-	{{94, 209, 200, 201, 149, 162, 248, 134, 239, 226, 1, 237, 16, 134, 56, 0}, 118, 170},
-	{{187, 42, 31, 144, 236, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 174},
-	{{90, 214, 185, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 104},
-	{{194, 220, 211, 212, 211, 32, 196, 98, 71, 62, 153, 103, 80, 35, 128, 0}, 114, 113},
-	{{24, 255, 158, 64, 180, 148, 10, 81, 243, 247, 0, 0, 0, 0, 0, 0}, 80, 89},
-	{{231, 155, 100, 242, 112, 160, 160, 95, 98, 253, 219, 21, 239, 90, 0, 0}, 113, 151},
-	{{225, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 108},
-	{{136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 224},
-	{{250, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 95},
-	{{72, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 173},
-	{{185, 51, 51, 167, 18, 44, 36, 59, 35, 135, 20, 104, 0, 0, 0, 0}, 93, 176},
-	{{57, 146, 252, 60, 197, 68, 39, 162, 80, 198, 137, 50, 97, 92, 124, 0}, 119, 84},
-	{{254, 46, 242, 105, 86, 94, 96, 14, 130, 176, 0, 0, 0, 0, 0, 0}, 78, 104},
-	{{247, 202, 176, 76, 69, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 236},
-	{{50, 233, 203, 77, 42, 21, 115, 163, 166, 138, 192, 52, 178, 37, 112, 0}, 116, 153},
-	{{62, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 190},
-	{{53, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 202},
-	{{198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 54},
-	{{189, 234, 106, 247, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 156},
-	{{110, 24, 228, 65, 216, 147, 9, 48, 60, 179, 172, 91, 115, 185, 227, 96}, 126, 245},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 218},
-	{{74, 177, 89, 218, 248, 18, 176, 39, 118, 173, 201, 152, 0, 0, 0, 0}, 93, 72},
-	{{31, 13, 153, 92, 27, 122, 150, 232, 88, 95, 202, 171, 208, 158, 0, 0}, 112, 183},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 183},
-	{{63, 37, 46, 158, 139, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 241},
-	{{53, 209, 59, 13, 202, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 106},
-	{{184, 44, 149, 221, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 180},
-	{{222, 134, 37, 62, 223, 193, 39, 246, 15, 151, 200, 146, 0, 0, 0, 0}, 96, 142},
-	{{199, 176, 189, 37, 233, 177, 252, 216, 94, 175, 253, 119, 96, 0, 0, 0}, 100, 6},
-	{{44, 195, 201, 106, 209, 120, 122, 38, 43, 30, 142, 22, 196, 175, 100, 0}, 118, 33},
-	{{33, 166, 10, 174, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 224},
-	{{54, 1, 189, 195, 133, 49, 36, 80, 138, 200, 0, 0, 0, 0, 0, 0}, 78, 14},
-	{{241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 149},
-	{{221, 131, 4, 247, 112, 89, 187, 119, 219, 80, 122, 156, 216, 160, 0, 0}, 108, 131},
-	{{102, 20, 46, 129, 202, 247, 129, 1, 237, 71, 103, 58, 217, 44, 4, 0}, 121, 133},
-	{{107, 156, 151, 44, 215, 98, 171, 126, 85, 32, 42, 128, 0, 0, 0, 0}, 89, 33},
-	{{54, 25, 70, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 204},
-	{{149, 211, 242, 14, 112, 219, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 43},
-	{{95, 26, 143, 193, 8, 76, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 168},
-	{{63, 102, 244, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 180},
-	{{64, 85, 124, 226, 59, 239, 64, 130, 68, 122, 93, 74, 32, 37, 0, 0}, 112, 208},
-	{{113, 90, 253, 149, 3, 218, 34, 215, 3, 143, 192, 64, 0, 0, 0, 0}, 90, 25},
-	{{75, 231, 33, 5, 11, 94, 117, 104, 150, 60, 72, 161, 96, 38, 0, 0}, 111, 50},
-	{{52, 13, 248, 1, 251, 14, 50, 29, 212, 123, 130, 177, 101, 96, 0, 0}, 109, 110},
-	{{248, 221, 150, 132, 252, 82, 96, 2, 80, 232, 97, 239, 253, 64, 0, 0}, 109, 21},
-	{{136, 77, 164, 161, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 147},
-	{{1, 33, 66, 254, 144, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 56},
-	{{181, 25, 186, 225, 109, 190, 76, 158, 118, 122, 20, 64, 125, 55, 8, 0}, 117, 144},
-	{{191, 187, 160, 140, 17, 6, 80, 120, 236, 212, 104, 144, 128, 0, 0, 0}, 100, 198},
-	{{201, 61, 150, 254, 70, 77, 214, 211, 171, 163, 245, 64, 0, 0, 0, 0}, 90, 235},
-	{{143, 226, 190, 50, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 105},
-	{{65, 168, 226, 36, 201, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 138},
-	{{136, 40, 65, 90, 47, 16, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 122},
-	{{94, 189, 224, 200, 170, 11, 79, 172, 0, 0, 0, 0, 0, 0, 0, 0}, 65, 193},
-	{{236, 41, 169, 234, 14, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 231},
-	{{1, 40, 140, 95, 81, 173, 250, 248, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 250},
-	{{83, 176, 146, 112, 89, 156, 57, 220, 125, 48, 44, 0, 0, 0, 0, 0}, 86, 24},
-	{{76, 125, 228, 249, 243, 160, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 191},
-	{{10, 203, 204, 49, 212, 115, 125, 4, 239, 122, 81, 34, 1, 198, 216, 0}, 117, 111},
-	{{74, 214, 23, 44, 211, 40, 161, 61, 237, 190, 155, 59, 173, 42, 0, 0}, 111, 205},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 133},
-	{{127, 0, 130, 61, 209, 5, 232, 35, 35, 42, 114, 52, 169, 234, 191, 0}, 122, 122},
-	{{201, 107, 210, 13, 187, 62, 145, 28, 31, 189, 56, 0, 0, 0, 0, 0}, 87, 227},
-	{{147, 171, 63, 145, 47, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 53},
-	{{93, 232, 10, 97, 21, 243, 213, 135, 200, 0, 0, 0, 0, 0, 0, 0}, 72, 224},
-	{{144, 121, 41, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 199},
-	{{116, 105, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 79},
-	{{142, 149, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 19},
-	{{97, 0, 228, 158, 50, 233, 251, 249, 0, 66, 197, 226, 0, 0, 0, 0}, 96, 211},
-	{{114, 228, 199, 155, 175, 104, 26, 213, 66, 249, 120, 218, 164, 252, 212, 0}, 120, 6},
-	{{224, 166, 76, 200, 121, 60, 110, 65, 60, 95, 137, 190, 92, 218, 218, 0}, 121, 143},
-	{{139, 219, 92, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 135},
-	{{203, 237, 64, 189, 28, 13, 75, 197, 219, 243, 172, 3, 142, 32, 0, 0}, 109, 21},
-	{{237, 186, 88, 254, 124, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 220},
-	{{182, 230, 93, 162, 129, 25, 56, 196, 112, 0, 0, 0, 0, 0, 0, 0}, 68, 151},
-	{{245, 45, 69, 226, 90, 212, 254, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 111},
-	{{107, 229, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 63},
-	{{119, 208, 177, 235, 222, 252, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 112},
-	{{178, 151, 220, 162, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 48},
-	{{109, 26, 95, 170, 166, 151, 137, 83, 226, 82, 5, 114, 253, 210, 18, 12}, 126, 100},
-	{{126, 27, 252, 19, 219, 129, 121, 48, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 156},
-	{{211, 195, 152, 145, 154, 93, 228, 215, 135, 101, 28, 82, 0, 0, 0, 0}, 95, 120},
-	{{252, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 5},
-	{{192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 103},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 84},
-	{{225, 179, 43, 43, 222, 145, 205, 238, 164, 158, 147, 229, 56, 0, 0, 0}, 101, 24},
-	{{208, 127, 151, 24, 64, 113, 47, 85, 209, 79, 144, 0, 0, 0, 0, 0}, 86, 81},
-	{{178, 144, 203, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 96},
-	{{56, 227, 139, 4, 86, 87, 180, 1, 215, 167, 237, 156, 111, 64, 47, 0}, 121, 6},
-	{{80, 76, 204, 119, 172, 169, 254, 81, 104, 166, 219, 44, 173, 161, 212, 0}, 119, 40},
-	{{129, 141, 139, 34, 241, 101, 223, 144, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 143},
-	{{85, 102, 137, 98, 65, 103, 54, 142, 144, 0, 0, 0, 0, 0, 0, 0}, 68, 69},
-	{{56, 31, 159, 13, 201, 139, 161, 31, 89, 137, 4, 0, 0, 0, 0, 0}, 92, 48},
-	{{229, 221, 54, 216, 223, 27, 196, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 115},
-	{{5, 144, 176, 43, 180, 187, 20, 49, 59, 73, 108, 34, 83, 32, 192, 0}, 115, 130},
-	{{24, 217, 205, 193, 74, 123, 160, 106, 103, 74, 200, 0, 0, 0, 0, 0}, 86, 57},
-	{{247, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 97},
-	{{12, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 146},
-	{{160, 28, 201, 119, 148, 93, 251, 118, 28, 179, 123, 52, 71, 232, 48, 0}, 117, 194},
-	{{152, 126, 17, 54, 101, 56, 130, 1, 205, 41, 207, 90, 151, 123, 128, 0}, 114, 129},
-	{{77, 165, 29, 239, 95, 242, 34, 1, 11, 204, 135, 239, 128, 0, 0, 0}, 97, 159},
-	{{183, 108, 146, 118, 74, 190, 7, 141, 9, 92, 2, 2, 8, 218, 120, 0}, 117, 242},
-	{{37, 152, 29, 239, 242, 53, 56, 143, 219, 22, 14, 158, 49, 0, 0, 0}, 104, 162},
-	{{198, 53, 241, 102, 240, 244, 97, 203, 62, 128, 213, 214, 220, 0, 0, 0}, 102, 140},
-	{{144, 89, 48, 42, 249, 231, 189, 178, 232, 199, 30, 58, 63, 57, 0, 0}, 113, 77},
-	{{68, 212, 177, 123, 44, 224, 19, 172, 89, 87, 192, 0, 0, 0, 0, 0}, 82, 121},
-	{{252, 29, 179, 224, 4, 121, 205, 67, 152, 0, 0, 0, 0, 0, 0, 0}, 69, 102},
-	{{28, 110, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 28},
-	{{24, 88, 231, 1, 4, 71, 71, 241, 252, 14, 197, 0, 0, 0, 0, 0}, 89, 154},
-	{{63, 131, 43, 76, 58, 140, 163, 74, 158, 80, 0, 0, 0, 0, 0, 0}, 76, 39},
-	{{56, 28, 147, 149, 98, 93, 216, 216, 203, 156, 0, 0, 0, 0, 0, 0}, 78, 163},
-	{{134, 169, 6, 103, 161, 244, 134, 117, 16, 0, 0, 0, 0, 0, 0, 0}, 68, 42},
-	{{143, 247, 125, 190, 106, 50, 204, 98, 250, 151, 161, 96, 0, 0, 0, 0}, 92, 207},
-	{{235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 25},
-	{{46, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 150},
-	{{171, 35, 128, 117, 74, 29, 199, 67, 109, 176, 0, 0, 0, 0, 0, 0}, 76, 103},
-	{{220, 233, 236, 112, 135, 136, 215, 43, 42, 0, 0, 0, 0, 0, 0, 0}, 71, 155},
-	{{228, 11, 144, 117, 206, 192, 118, 25, 141, 78, 4, 105, 0, 0, 0, 0}, 96, 142},
-	{{195, 67, 194, 229, 14, 53, 129, 7, 30, 208, 38, 100, 182, 59, 0, 0}, 112, 2},
-	{{25, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 59},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 112},
-	{{26, 203, 217, 152, 16, 187, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 166},
-	{{250, 213, 14, 235, 110, 171, 174, 23, 102, 128, 0, 0, 0, 0, 0, 0}, 73, 62},
-	{{175, 230, 160, 13, 187, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 176},
-	{{92, 155, 156, 93, 191, 73, 28, 82, 187, 129, 57, 5, 16, 0, 0, 0}, 100, 6},
-	{{45, 203, 3, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 26},
-	{{120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 6},
-	{{216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 13},
-	{{135, 215, 0, 71, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 41},
-	{{221, 149, 1, 40, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 135},
-	{{95, 143, 255, 194, 2, 157, 191, 113, 10, 229, 204, 56, 0, 0, 0, 0}, 93, 171},
-	{{202, 212, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 20},
-	{{147, 203, 238, 120, 194, 23, 25, 58, 208, 177, 169, 0, 0, 0, 0, 0}, 89, 119},
-	{{137, 170, 113, 252, 215, 194, 224, 146, 233, 87, 86, 192, 26, 46, 0, 0}, 112, 49},
-	{{224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 141},
-	{{250, 90, 241, 174, 163, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 132},
-	{{66, 190, 202, 144, 122, 86, 22, 103, 107, 164, 57, 54, 228, 128, 0, 0}, 105, 176},
-	{{76, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 186},
-	{{120, 246, 1, 52, 187, 163, 78, 105, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 93},
-	{{137, 242, 136, 71, 98, 10, 53, 97, 160, 85, 132, 127, 185, 222, 0, 0}, 111, 242},
-	{{255, 133, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 163},
-	{{128, 177, 92, 155, 91, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 184},
-	{{45, 120, 186, 192, 240, 199, 178, 95, 32, 0, 0, 0, 0, 0, 0, 0}, 68, 188},
-	{{151, 98, 103, 254, 90, 6, 10, 109, 14, 158, 69, 29, 140, 237, 40, 232}, 126, 193},
-	{{148, 164, 81, 85, 76, 14, 84, 64, 89, 176, 0, 0, 0, 0, 0, 0}, 78, 63},
-	{{145, 187, 165, 136, 88, 30, 107, 191, 205, 120, 119, 216, 158, 123, 64, 0}, 115, 160},
-	{{78, 120, 28, 243, 216, 180, 87, 19, 253, 16, 110, 33, 228, 24, 232, 0}, 117, 251},
-	{{74, 6, 166, 166, 183, 157, 96, 84, 151, 0, 0, 0, 0, 0, 0, 0}, 72, 228},
-	{{89, 96, 4, 221, 214, 253, 58, 49, 9, 0, 0, 0, 0, 0, 0, 0}, 72, 168},
-	{{97, 9, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 194},
-	{{213, 215, 45, 200, 170, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 166},
-	{{5, 14, 92, 0, 28, 245, 130, 202, 32, 40, 207, 77, 166, 170, 246, 64}, 122, 210},
-	{{77, 45, 43, 71, 202, 0, 157, 146, 59, 91, 225, 0, 0, 0, 0, 0}, 89, 254},
-	{{101, 174, 94, 168, 162, 171, 71, 12, 16, 224, 0, 0, 0, 0, 0, 0}, 75, 49},
-	{{58, 17, 187, 194, 87, 73, 215, 103, 180, 12, 40, 66, 0, 0, 0, 0}, 96, 95},
-	{{160, 91, 68, 81, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 193},
-	{{94, 112, 249, 13, 167, 245, 101, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 155},
-	{{236, 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 133},
-	{{168, 243, 103, 221, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 10},
-	{{86, 194, 218, 188, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 31},
-	{{232, 3, 134, 67, 63, 196, 86, 14, 170, 243, 77, 134, 187, 140, 72, 18}, 127, 98},
-	{{55, 253, 19, 201, 199, 71, 229, 218, 54, 64, 12, 162, 0, 0, 0, 0}, 96, 22},
-	{{142, 34, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 214},
-	{{213, 16, 208, 50, 100, 33, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 217},
-	{{117, 237, 132, 185, 184, 246, 79, 42, 103, 98, 162, 243, 128, 0, 0, 0}, 98, 102},
-	{{120, 25, 214, 222, 61, 157, 203, 102, 3, 146, 192, 0, 0, 0, 0, 0}, 83, 169},
-	{{222, 46, 254, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 152},
-	{{254, 70, 158, 171, 11, 245, 223, 97, 70, 17, 27, 192, 186, 0, 0, 0}, 103, 214},
-	{{192, 128, 228, 17, 68, 20, 44, 31, 52, 34, 212, 1, 224, 0, 0, 0}, 99, 178},
-	{{237, 229, 203, 8, 121, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 164},
-	{{6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 15},
-	{{71, 197, 251, 122, 138, 232, 12, 241, 116, 240, 0, 0, 0, 0, 0, 0}, 76, 94},
-	{{18, 241, 135, 210, 233, 54, 121, 185, 4, 0, 0, 0, 0, 0, 0, 0}, 70, 239},
-	{{32, 50, 213, 63, 73, 217, 180, 21, 187, 128, 0, 0, 0, 0, 0, 0}, 73, 82},
-	{{203, 166, 233, 73, 92, 182, 212, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 54},
-	{{56, 162, 126, 4, 18, 195, 192, 64, 164, 156, 119, 196, 64, 0, 0, 0}, 98, 47},
-	{{120, 87, 81, 136, 180, 179, 68, 148, 243, 38, 80, 0, 0, 0, 0, 0}, 84, 214},
-	{{64, 244, 193, 50, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 215},
-	{{91, 168, 253, 158, 131, 83, 159, 163, 113, 169, 112, 0, 0, 0, 0, 0}, 84, 153},
-	{{159, 103, 102, 132, 111, 46, 18, 77, 36, 15, 137, 33, 177, 31, 243, 192}, 122, 245},
-	{{123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 118},
-	{{67, 81, 226, 190, 7, 79, 71, 250, 155, 245, 44, 81, 215, 213, 171, 224}, 123, 128},
-	{{103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 7},
-	{{246, 44, 168, 200, 198, 238, 52, 196, 125, 115, 0, 0, 0, 0, 0, 0}, 80, 152},
-	{{205, 14, 186, 252, 239, 213, 59, 119, 105, 37, 140, 209, 4, 231, 0, 0}, 114, 248},
-	{{70, 91, 254, 106, 94, 71, 170, 19, 158, 242, 192, 0, 0, 0, 0, 0}, 85, 143},
-	{{250, 86, 233, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 159},
-	{{122, 222, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 11},
-	{{27, 224, 235, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 110},
-	{{239, 100, 224, 3, 46, 127, 150, 251, 204, 120, 228, 64, 0, 0, 0, 0}, 97, 181},
-	{{144, 115, 182, 206, 146, 13, 21, 111, 37, 70, 179, 129, 173, 82, 93, 128}, 121, 4},
-	{{73, 190, 57, 243, 49, 51, 15, 209, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 101},
-	{{18, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 38},
-	{{23, 37, 236, 177, 186, 7, 209, 135, 114, 44, 0, 0, 0, 0, 0, 0}, 78, 57},
-	{{200, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 142},
-	{{181, 255, 153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 184},
-	{{135, 168, 6, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 91},
-	{{200, 224, 33, 245, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 224},
-	{{70, 111, 10, 62, 200, 224, 38, 204, 14, 164, 0, 0, 0, 0, 0, 0}, 78, 114},
-	{{158, 133, 252, 18, 242, 12, 16, 60, 5, 52, 251, 179, 38, 235, 12, 0}, 118, 184},
-	{{2, 23, 116, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 215},
-	{{33, 25, 170, 74, 215, 134, 151, 181, 175, 232, 20, 155, 189, 242, 13, 0}, 120, 167},
-	{{160, 186, 218, 183, 167, 84, 59, 152, 13, 137, 80, 128, 0, 0, 0, 0}, 89, 233},
-	{{32, 141, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 101},
-	{{207, 24, 202, 226, 191, 136, 78, 124, 160, 0, 0, 0, 0, 0, 0, 0}, 67, 139},
-	{{210, 173, 172, 27, 197, 57, 114, 146, 169, 32, 0, 0, 0, 0, 0, 0}, 79, 32},
-	{{95, 113, 12, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 57},
-	{{129, 108, 186, 28, 19, 229, 96, 134, 199, 254, 199, 64, 0, 0, 0, 0}, 91, 151},
-	{{103, 226, 38, 123, 35, 199, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 0},
-	{{41, 117, 43, 35, 208, 115, 73, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 227},
-	{{42, 220, 61, 34, 199, 183, 42, 16, 223, 135, 0, 135, 213, 150, 100, 0}, 118, 124},
-	{{165, 227, 96, 243, 112, 171, 117, 106, 50, 37, 82, 60, 80, 0, 0, 0}, 104, 228},
-	{{158, 60, 111, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 64},
-	{{124, 108, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 179},
-	{{232, 68, 132, 159, 156, 103, 95, 190, 76, 0, 0, 0, 0, 0, 0, 0}, 70, 107},
-	{{70, 77, 240, 209, 72, 63, 63, 45, 125, 79, 77, 41, 13, 0, 0, 0}, 104, 206},
-	{{146, 254, 7, 5, 68, 240, 67, 237, 112, 0, 0, 0, 0, 0, 0, 0}, 68, 95},
-	{{162, 223, 117, 27, 2, 156, 94, 170, 157, 114, 162, 50, 0, 0, 0, 0}, 96, 219},
-	{{161, 62, 191, 68, 239, 73, 100, 37, 168, 254, 139, 202, 252, 65, 74, 0}, 119, 138},
-	{{248, 122, 115, 81, 15, 158, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 84},
-	{{8, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 161},
-	{{142, 96, 105, 133, 251, 57, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 25},
-	{{138, 196, 139, 131, 233, 93, 65, 242, 86, 169, 7, 72, 82, 128, 0, 0}, 107, 113},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 46},
-	{{175, 151, 75, 238, 26, 12, 100, 186, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 72},
-	{{82, 205, 211, 176, 170, 79, 57, 153, 161, 218, 32, 48, 0, 0, 0, 0}, 93, 230},
-	{{227, 123, 232, 74, 236, 202, 211, 121, 200, 8, 59, 189, 81, 219, 144, 0}, 117, 142},
-	{{205, 196, 89, 90, 103, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 134},
-	{{63, 145, 23, 127, 102, 216, 49, 36, 168, 164, 59, 133, 18, 146, 0, 0}, 112, 100},
-	{{213, 72, 154, 16, 230, 236, 218, 203, 223, 51, 31, 251, 103, 64, 0, 0}, 109, 45},
-	{{126, 148, 232, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 219},
-	{{160, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 52},
-	{{137, 38, 146, 20, 99, 188, 83, 123, 159, 159, 64, 0, 0, 0, 0, 0}, 83, 240},
-	{{123, 228, 36, 44, 242, 29, 51, 228, 140, 60, 237, 0, 0, 0, 0, 0}, 90, 13},
-	{{163, 169, 25, 89, 190, 114, 165, 158, 140, 210, 192, 0, 0, 0, 0, 0}, 84, 191},
-	{{225, 38, 70, 89, 218, 236, 60, 5, 69, 163, 248, 50, 163, 64, 0, 0}, 106, 95},
-	{{91, 94, 36, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 65},
-	{{209, 238, 110, 0, 2, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 195},
-	{{57, 17, 224, 164, 69, 95, 138, 172, 111, 55, 239, 167, 160, 0, 0, 0}, 103, 21},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 114},
-	{{102, 96, 223, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 92},
-	{{137, 204, 150, 75, 193, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 237},
-	{{136, 56, 252, 240, 85, 48, 248, 231, 17, 49, 47, 238, 15, 233, 159, 184}, 125, 172},
-	{{57, 31, 132, 123, 234, 255, 37, 82, 167, 204, 37, 158, 128, 0, 0, 0}, 98, 116},
-	{{55, 198, 139, 219, 161, 156, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 54},
-	{{44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 203},
-	{{53, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 74},
-	{{227, 62, 107, 236, 118, 156, 60, 34, 31, 179, 76, 221, 0, 0, 0, 0}, 96, 220},
-	{{105, 40, 240, 216, 91, 61, 19, 128, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 219},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 179},
-	{{118, 142, 251, 249, 128, 105, 113, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 194},
-	{{101, 70, 196, 238, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 187},
-	{{245, 173, 165, 177, 200, 161, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 79},
-	{{0, 198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 87},
-	{{92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 126},
-	{{125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 106},
-	{{56, 59, 35, 82, 101, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 96},
-	{{184, 72, 77, 251, 8, 166, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 45},
-	{{143, 74, 132, 205, 218, 247, 30, 160, 145, 199, 138, 12, 89, 220, 0, 0}, 110, 8},
-	{{30, 178, 111, 225, 73, 79, 173, 52, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 226},
-	{{224, 48, 154, 231, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 222},
-	{{123, 144, 170, 143, 85, 169, 130, 245, 214, 0, 0, 0, 0, 0, 0, 0}, 71, 218},
-	{{166, 224, 212, 100, 149, 55, 35, 210, 246, 108, 41, 245, 127, 174, 128, 0}, 116, 59},
-	{{75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 80},
-	{{197, 128, 190, 87, 47, 53, 92, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 177},
-	{{249, 10, 76, 217, 225, 20, 124, 205, 44, 159, 190, 8, 0, 0, 0, 0}, 98, 44},
-	{{180, 226, 0, 167, 137, 232, 174, 120, 113, 95, 22, 184, 0, 0, 0, 0}, 93, 206},
-	{{123, 153, 102, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 64},
-	{{5, 144, 206, 158, 239, 189, 171, 120, 69, 46, 128, 237, 0, 0, 0, 0}, 96, 236},
-	{{159, 235, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 101},
-	{{42, 194, 150, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 49},
-	{{205, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 179},
-	{{19, 65, 141, 20, 127, 77, 70, 205, 151, 115, 157, 23, 118, 128, 0, 0}, 109, 112},
-	{{96, 11, 214, 40, 245, 251, 61, 64, 128, 241, 183, 183, 0, 0, 0, 0}, 96, 31},
-	{{120, 4, 235, 112, 34, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 111},
-	{{110, 127, 207, 76, 100, 148, 130, 206, 249, 2, 104, 0, 0, 0, 0, 0}, 86, 65},
-	{{226, 190, 191, 249, 173, 96, 127, 200, 62, 20, 0, 0, 0, 0, 0, 0}, 78, 222},
-	{{89, 88, 182, 14, 78, 122, 213, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 4},
-	{{167, 94, 163, 227, 28, 111, 117, 103, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 67},
-	{{57, 220, 53, 116, 243, 184, 242, 134, 16, 70, 83, 61, 161, 128, 0, 0}, 109, 197},
-	{{63, 235, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 121},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 167},
-	{{15, 159, 42, 167, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 140},
-	{{216, 252, 113, 40, 239, 46, 172, 48, 103, 250, 82, 179, 136, 64, 0, 0}, 106, 193},
-	{{158, 147, 16, 44, 124, 56, 44, 48, 138, 64, 169, 0, 0, 0, 0, 0}, 90, 47},
-	{{238, 238, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 187},
-	{{63, 159, 177, 162, 106, 212, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 102},
-	{{59, 40, 252, 185, 187, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 237},
-	{{2, 218, 11, 68, 173, 196, 16, 223, 2, 18, 122, 215, 154, 0, 0, 0}, 103, 237},
-	{{3, 9, 206, 73, 108, 196, 183, 119, 141, 162, 10, 180, 115, 32, 0, 0}, 107, 115},
-	{{17, 227, 208, 146, 63, 201, 73, 239, 29, 79, 80, 0, 0, 0, 0, 0}, 84, 217},
-	{{115, 180, 176, 241, 52, 209, 6, 64, 189, 76, 0, 0, 0, 0, 0, 0}, 79, 21},
-	{{191, 88, 98, 245, 91, 46, 137, 254, 170, 80, 11, 55, 212, 28, 128, 0}, 113, 3},
-	{{97, 141, 171, 175, 22, 233, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 62},
-	{{32, 204, 102, 191, 164, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 80},
-	{{29, 133, 210, 252, 124, 66, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 184},
-	{{207, 179, 54, 144, 116, 67, 29, 64, 13, 199, 0, 0, 0, 0, 0, 0}, 80, 197},
-	{{129, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 63},
-	{{50, 152, 249, 143, 174, 234, 240, 48, 158, 255, 80, 105, 0, 0, 0, 0}, 99, 62},
-	{{105, 208, 95, 218, 44, 11, 87, 134, 109, 18, 138, 66, 17, 69, 128, 0}, 114, 231},
-	{{151, 79, 158, 220, 122, 101, 210, 164, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 158},
-	{{236, 97, 87, 155, 254, 137, 122, 208, 168, 201, 194, 118, 224, 0, 0, 0}, 101, 118},
-	{{14, 229, 193, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 237},
-	{{46, 154, 50, 80, 92, 147, 158, 86, 1, 112, 0, 0, 0, 0, 0, 0}, 79, 15},
-	{{88, 131, 21, 84, 62, 86, 7, 110, 142, 251, 242, 110, 194, 175, 247, 0}, 122, 84},
-	{{229, 216, 111, 92, 173, 32, 63, 70, 36, 84, 6, 74, 136, 166, 38, 0}, 119, 205},
-	{{121, 147, 216, 245, 37, 189, 146, 63, 145, 74, 128, 0, 0, 0, 0, 0}, 82, 220},
-	{{44, 26, 254, 11, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 42},
-	{{209, 114, 97, 249, 227, 159, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 144},
-	{{184, 244, 43, 117, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 74},
-	{{60, 81, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 89},
-	{{18, 40, 21, 113, 226, 91, 195, 88, 161, 19, 142, 0, 0, 0, 0, 0}, 88, 77},
-	{{57, 0, 212, 158, 56, 51, 108, 198, 59, 5, 137, 196, 0, 0, 0, 0}, 94, 2},
-	{{168, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 75},
-	{{64, 181, 254, 103, 1, 230, 117, 199, 128, 0, 0, 0, 0, 0, 0, 0}, 65, 18},
-	{{212, 48, 214, 127, 78, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 246},
-	{{155, 185, 236, 163, 204, 49, 129, 120, 183, 47, 10, 243, 65, 92, 192, 0}, 114, 10},
-	{{94, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 207},
-	{{19, 210, 136, 113, 73, 79, 132, 196, 224, 0, 0, 0, 0, 0, 0, 0}, 68, 41},
-	{{24, 203, 246, 242, 241, 223, 150, 237, 213, 202, 11, 128, 0, 0, 0, 0}, 89, 102},
-	{{115, 59, 171, 221, 172, 181, 170, 67, 115, 205, 44, 107, 162, 67, 56, 0}, 118, 118},
-	{{250, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 146},
-	{{203, 240, 28, 158, 182, 12, 86, 182, 142, 47, 143, 57, 239, 0, 0, 0}, 104, 122},
-	{{196, 218, 109, 52, 2, 0, 64, 153, 34, 250, 240, 185, 117, 0, 0, 0}, 107, 6},
-	{{137, 131, 191, 40, 72, 209, 74, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 18},
-	{{236, 126, 167, 37, 185, 20, 34, 207, 76, 0, 0, 0, 0, 0, 0, 0}, 70, 83},
-	{{129, 192, 245, 137, 251, 52, 75, 68, 81, 112, 146, 133, 64, 0, 0, 0}, 99, 90},
-	{{7, 31, 148, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 140},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 242},
-	{{167, 50, 202, 179, 74, 146, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 31},
-	{{44, 188, 186, 250, 229, 71, 28, 118, 35, 253, 245, 191, 199, 18, 0, 0}, 111, 9},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 230},
-	{{156, 163, 215, 175, 71, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 50},
-	{{67, 24, 151, 198, 242, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 34},
-	{{134, 107, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 11},
-	{{35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 71},
-	{{46, 196, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 146},
-	{{82, 172, 8, 26, 154, 34, 125, 188, 5, 149, 159, 44, 78, 222, 236, 176}, 124, 249},
-	{{78, 157, 79, 70, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 143},
-	{{231, 5, 210, 247, 198, 5, 157, 191, 206, 225, 149, 142, 207, 40, 0, 0}, 110, 17},
-	{{38, 254, 235, 199, 191, 60, 43, 159, 190, 243, 203, 185, 184, 218, 132, 0}, 119, 60},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 162},
-	{{95, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 5},
-	{{17, 128, 244, 178, 160, 78, 83, 92, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 139},
-	{{18, 102, 62, 251, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 8},
-	{{30, 75, 108, 40, 231, 166, 233, 220, 163, 176, 252, 210, 60, 30, 128, 0}, 114, 246},
-	{{18, 3, 207, 64, 25, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 171},
-	{{52, 83, 235, 61, 164, 236, 83, 173, 143, 105, 14, 0, 0, 0, 0, 0}, 88, 206},
-	{{166, 175, 186, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 163},
-	{{221, 154, 82, 98, 41, 126, 85, 52, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 166},
-	{{94, 84, 182, 120, 204, 232, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 128},
-	{{27, 174, 227, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 59},
-	{{218, 12, 4, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 179},
-	{{9, 5, 190, 195, 60, 216, 80, 150, 128, 117, 86, 128, 128, 112, 98, 208}, 124, 87},
-	{{7, 226, 104, 112, 212, 9, 172, 124, 209, 121, 170, 229, 44, 178, 128, 0}, 114, 29},
-	{{47, 71, 174, 76, 52, 83, 23, 18, 106, 48, 56, 32, 0, 0, 0, 0}, 91, 184},
-	{{51, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 45},
-	{{28, 182, 167, 124, 28, 22, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 144},
-	{{34, 61, 14, 51, 253, 17, 19, 170, 49, 206, 188, 207, 247, 167, 192, 0}, 114, 119},
-	{{2, 235, 18, 14, 195, 66, 237, 30, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 113},
-	{{51, 182, 142, 133, 127, 96, 159, 132, 99, 161, 64, 0, 0, 0, 0, 0}, 82, 50},
-	{{170, 145, 230, 123, 215, 189, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 207},
-	{{151, 166, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 3},
-	{{16, 141, 196, 129, 132, 207, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 13},
-	{{205, 25, 184, 191, 201, 206, 109, 224, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 42},
-	{{48, 114, 33, 103, 247, 255, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 31},
-	{{179, 156, 119, 146, 125, 21, 42, 146, 237, 213, 191, 132, 0, 0, 0, 0}, 94, 30},
-	{{179, 129, 186, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 94},
-	{{17, 179, 217, 188, 128, 212, 4, 4, 152, 0, 0, 0, 0, 0, 0, 0}, 71, 190},
-	{{132, 63, 74, 89, 209, 64, 63, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 238},
-	{{16, 50, 248, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 20},
-	{{189, 96, 58, 53, 191, 235, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 84},
-	{{111, 98, 6, 65, 35, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 108},
-	{{118, 223, 83, 220, 110, 122, 23, 112, 185, 155, 73, 0, 0, 0, 0, 0}, 89, 136},
-	{{173, 191, 150, 197, 204, 35, 169, 79, 31, 214, 251, 240, 0, 0, 0, 0}, 93, 196},
-	{{26, 76, 129, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 67},
-	{{231, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 104},
-	{{93, 172, 223, 252, 203, 0, 206, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 15},
-	{{53, 142, 203, 124, 104, 51, 241, 12, 161, 17, 101, 245, 120, 110, 192, 199}, 128, 237},
-	{{9, 77, 120, 197, 193, 10, 237, 174, 233, 2, 165, 11, 229, 47, 144, 0}, 116, 224},
-	{{99, 161, 189, 88, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 179},
-	{{18, 8, 76, 66, 2, 185, 206, 132, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 84},
-	{{169, 53, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 65},
-	{{136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 178},
-	{{131, 162, 144, 124, 12, 98, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 154},
-	{{75, 50, 129, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 106},
-	{{212, 183, 40, 225, 152, 136, 174, 91, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 125},
-	{{158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 118},
-	{{7, 48, 132, 149, 169, 212, 198, 137, 202, 0, 0, 0, 0, 0, 0, 0}, 73, 52},
-	{{173, 195, 129, 163, 141, 249, 40, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 173},
-	{{109, 79, 75, 219, 205, 182, 22, 245, 223, 17, 146, 78, 109, 119, 128, 0}, 113, 8},
-	{{174, 195, 24, 182, 215, 198, 214, 86, 34, 128, 0, 0, 0, 0, 0, 0}, 74, 211},
-	{{22, 40, 51, 109, 70, 91, 152, 56, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 253},
-	{{169, 115, 246, 126, 65, 118, 219, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 47},
-	{{154, 37, 70, 124, 107, 123, 232, 241, 164, 142, 71, 226, 182, 126, 0, 0}, 112, 73},
-	{{6, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 192},
-	{{216, 167, 158, 158, 222, 19, 96, 28, 40, 6, 70, 12, 147, 27, 85, 240}, 128, 55},
-	{{72, 222, 52, 69, 69, 206, 163, 106, 235, 206, 80, 128, 0, 0, 0, 0}, 94, 147},
-	{{150, 112, 106, 56, 15, 243, 154, 97, 134, 110, 160, 20, 183, 144, 234, 8}, 125, 86},
-	{{58, 186, 106, 58, 124, 171, 53, 85, 33, 100, 64, 0, 0, 0, 0, 0}, 82, 16},
-	{{7, 195, 22, 31, 62, 217, 209, 46, 90, 49, 189, 50, 168, 126, 0, 0}, 111, 167},
-	{{92, 44, 159, 198, 185, 94, 231, 177, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 148},
-	{{169, 108, 190, 162, 23, 39, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 66},
-	{{161, 5, 3, 11, 158, 157, 166, 212, 246, 22, 140, 101, 92, 0, 0, 0}, 104, 70},
-	{{71, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 166},
-	{{48, 136, 194, 145, 57, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 109},
-	{{144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 226},
-	{{223, 209, 10, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 8},
-	{{154, 79, 170, 9, 43, 139, 249, 176, 186, 72, 216, 0, 0, 0, 0, 0}, 85, 218},
-	{{1, 8, 123, 205, 167, 134, 128, 102, 10, 72, 0, 0, 0, 0, 0, 0}, 78, 54},
-	{{31, 105, 48, 77, 103, 187, 99, 67, 96, 0, 0, 0, 0, 0, 0, 0}, 67, 48},
-	{{14, 73, 54, 76, 232, 35, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 244},
-	{{14, 109, 251, 190, 36, 253, 99, 120, 94, 64, 0, 0, 0, 0, 0, 0}, 74, 50},
-	{{122, 170, 9, 134, 124, 91, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 173},
-	{{246, 10, 85, 88, 82, 217, 95, 56, 216, 203, 160, 0, 0, 0, 0, 0}, 84, 245},
-	{{77, 100, 114, 207, 150, 177, 69, 134, 74, 131, 147, 117, 177, 64, 210, 128}, 121, 54},
-	{{171, 123, 22, 138, 132, 229, 250, 81, 186, 227, 146, 27, 170, 205, 128, 0}, 113, 86},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 115},
-	{{12, 35, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 144},
-	{{255, 124, 179, 165, 169, 250, 66, 171, 223, 125, 247, 0, 0, 0, 0, 0}, 89, 171},
-	{{244, 235, 211, 10, 251, 255, 206, 6, 198, 12, 50, 136, 0, 0, 0, 0}, 93, 231},
-	{{221, 77, 237, 41, 50, 33, 103, 24, 25, 127, 208, 0, 0, 0, 0, 0}, 88, 34},
-	{{216, 69, 47, 53, 117, 24, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 225},
-	{{180, 87, 25, 236, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 174},
-	{{110, 32, 24, 34, 116, 133, 245, 128, 123, 95, 125, 122, 100, 129, 128, 0}, 113, 37},
-	{{27, 117, 179, 112, 133, 137, 110, 193, 246, 201, 219, 65, 56, 234, 106, 128}, 121, 39},
-	{{186, 117, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 59},
-	{{243, 119, 54, 16, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 96},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 147},
-	{{78, 48, 117, 200, 245, 118, 115, 240, 170, 125, 84, 103, 33, 168, 0, 0}, 110, 56},
-	{{201, 253, 184, 254, 143, 81, 95, 42, 243, 147, 96, 145, 23, 26, 0, 0}, 111, 234},
-	{{41, 215, 84, 136, 234, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 199},
-	{{91, 244, 137, 184, 231, 95, 135, 10, 184, 0, 0, 0, 0, 0, 0, 0}, 69, 191},
-	{{113, 31, 181, 245, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 235},
-	{{181, 216, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 45},
-	{{87, 26, 119, 229, 97, 255, 9, 43, 32, 0, 0, 0, 0, 0, 0, 0}, 67, 164},
-	{{205, 112, 67, 163, 196, 148, 5, 105, 8, 138, 144, 3, 171, 213, 159, 128}, 121, 130},
-	{{136, 27, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 166},
-	{{2, 175, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 140},
-	{{222, 131, 85, 218, 16, 229, 44, 230, 243, 76, 250, 139, 1, 203, 108, 0}, 118, 47},
-	{{101, 180, 77, 142, 194, 73, 196, 246, 107, 100, 194, 72, 204, 124, 0, 0}, 111, 148},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 103},
-	{{46, 62, 191, 130, 110, 128, 235, 62, 68, 39, 58, 152, 207, 204, 96, 0}, 116, 94},
-	{{111, 11, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 85},
-	{{58, 43, 14, 93, 102, 210, 117, 208, 222, 171, 130, 41, 16, 16, 0, 0}, 109, 250},
-	{{141, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 153},
-	{{170, 153, 160, 170, 144, 235, 122, 8, 106, 34, 24, 32, 102, 57, 12, 168}, 125, 182},
-	{{34, 113, 163, 107, 61, 177, 39, 172, 242, 2, 130, 0, 0, 0, 0, 0}, 94, 23},
-	{{222, 191, 239, 110, 162, 191, 195, 181, 80, 50, 85, 240, 88, 32, 0, 0}, 108, 38},
-	{{179, 82, 253, 151, 212, 0, 72, 253, 175, 22, 34, 78, 53, 32, 0, 0}, 110, 121},
-	{{10, 162, 20, 46, 164, 64, 88, 1, 202, 204, 124, 0, 0, 0, 0, 0}, 87, 146},
-	{{210, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 138},
-	{{183, 200, 1, 2, 51, 6, 66, 142, 20, 77, 48, 244, 0, 0, 0, 0}, 94, 149},
-	{{29, 20, 224, 57, 204, 161, 131, 254, 53, 133, 163, 0, 0, 0, 0, 0}, 88, 232},
-	{{75, 58, 170, 52, 146, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 255},
-	{{92, 21, 1, 113, 185, 88, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 148},
-	{{103, 180, 222, 187, 129, 117, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 117},
-	{{32, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 237},
-	{{7, 60, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 113},
-	{{167, 122, 205, 185, 21, 199, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 162},
-	{{21, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 225},
-	{{92, 159, 167, 169, 136, 176, 95, 255, 87, 137, 112, 16, 0, 0, 0, 0}, 92, 210},
-	{{84, 120, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 34},
-	{{126, 5, 126, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 224},
-	{{4, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 143},
-	{{239, 154, 181, 182, 189, 211, 244, 53, 144, 0, 0, 0, 0, 0, 0, 0}, 68, 216},
-	{{254, 188, 139, 167, 135, 47, 147, 239, 187, 106, 228, 156, 234, 234, 102, 0}, 120, 239},
-	{{225, 168, 138, 92, 193, 255, 47, 233, 11, 154, 205, 86, 209, 88, 0, 0}, 111, 54},
-	{{223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 35},
-	{{235, 252, 115, 10, 151, 104, 193, 207, 38, 228, 229, 245, 42, 13, 108, 0}, 119, 230},
-	{{1, 137, 53, 36, 210, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 234},
-	{{149, 182, 72, 197, 92, 229, 9, 10, 220, 128, 72, 19, 4, 58, 192, 0}, 115, 70},
-	{{105, 73, 57, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 246},
-	{{189, 61, 230, 24, 235, 82, 58, 102, 97, 111, 121, 252, 156, 94, 191, 166}, 127, 217},
-	{{193, 108, 231, 86, 140, 14, 192, 4, 135, 80, 129, 166, 158, 61, 230, 20}, 128, 201},
-	{{110, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 49},
-	{{3, 102, 36, 231, 15, 242, 143, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 2},
-	{{81, 189, 220, 168, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 64},
-	{{168, 75, 133, 180, 91, 165, 77, 232, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 239},
-	{{106, 179, 186, 109, 81, 234, 233, 167, 101, 160, 90, 102, 174, 234, 208, 0}, 116, 47},
-	{{46, 105, 234, 21, 23, 247, 169, 33, 47, 5, 0, 0, 0, 0, 0, 0}, 80, 43},
-	{{152, 144, 100, 142, 129, 23, 227, 50, 67, 81, 249, 116, 0, 0, 0, 0}, 94, 17},
-	{{109, 74, 145, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 5},
-	{{100, 243, 22, 230, 38, 44, 128, 86, 132, 57, 0, 0, 0, 0, 0, 0}, 81, 240},
-	{{153, 251, 115, 65, 104, 179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 197},
-	{{43, 113, 60, 224, 36, 20, 42, 161, 24, 223, 192, 0, 0, 0, 0, 0}, 84, 192},
-	{{61, 77, 121, 176, 138, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 160},
-	{{119, 194, 146, 49, 59, 242, 25, 220, 122, 104, 80, 0, 0, 0, 0, 0}, 84, 199},
-	{{254, 162, 155, 47, 187, 3, 1, 114, 142, 191, 152, 44, 144, 26, 202, 0}, 127, 217},
-	{{176, 1, 114, 42, 191, 145, 43, 1, 141, 18, 64, 0, 0, 0, 0, 0}, 83, 75},
-	{{170, 244, 67, 132, 145, 163, 76, 213, 85, 237, 248, 22, 207, 64, 0, 0}, 106, 222},
-	{{102, 190, 58, 32, 75, 15, 89, 163, 64, 7, 168, 0, 0, 0, 0, 0}, 85, 39},
-	{{124, 170, 35, 47, 152, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 9},
-	{{192, 221, 20, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 217},
-	{{208, 178, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 142},
-	{{188, 68, 77, 30, 68, 153, 102, 180, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 18},
-	{{114, 178, 121, 188, 205, 233, 35, 77, 34, 197, 158, 174, 101, 0, 0, 0}, 104, 180},
-	{{195, 98, 67, 12, 13, 43, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 205},
-	{{146, 190, 42, 222, 14, 54, 28, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 251},
-	{{185, 202, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 178},
-	{{138, 30, 129, 95, 224, 161, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 198},
-	{{69, 181, 5, 227, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 84},
-	{{90, 180, 0, 164, 227, 75, 174, 119, 128, 0, 0, 0, 0, 0, 0, 0}, 66, 128},
-	{{20, 60, 58, 119, 245, 177, 162, 186, 13, 112, 211, 239, 128, 0, 0, 0}, 97, 75},
-	{{158, 124, 157, 25, 230, 139, 51, 212, 76, 109, 236, 210, 48, 0, 0, 0}, 101, 192},
-	{{125, 108, 242, 36, 94, 13, 36, 106, 90, 51, 83, 217, 131, 151, 0, 0}, 114, 60},
-	{{222, 218, 162, 158, 15, 53, 191, 178, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 169},
-	{{104, 202, 127, 109, 73, 16, 17, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 10},
-	{{172, 171, 246, 26, 176, 34, 22, 152, 246, 56, 173, 120, 105, 60, 92, 0}, 118, 64},
-	{{190, 22, 171, 206, 109, 186, 179, 128, 253, 182, 108, 212, 220, 167, 171, 180}, 127, 182},
-	{{119, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 29},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 39},
-	{{170, 144, 64, 2, 107, 166, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 93},
-	{{234, 9, 96, 20, 156, 157, 1, 34, 88, 0, 0, 0, 0, 0, 0, 0}, 75, 228},
-	{{147, 237, 16, 120, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 236},
-	{{182, 189, 162, 158, 223, 90, 173, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 190},
-	{{116, 148, 142, 240, 10, 253, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 217},
-	{{211, 73, 140, 69, 252, 27, 75, 46, 37, 6, 147, 32, 0, 0, 0, 0}, 93, 74},
-	{{148, 61, 120, 49, 220, 65, 150, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 180},
-	{{172, 35, 202, 180, 129, 75, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 91},
-	{{215, 109, 147, 157, 32, 28, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 230},
-	{{151, 26, 182, 112, 205, 220, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 175},
-	{{73, 91, 93, 61, 196, 3, 66, 26, 149, 96, 0, 0, 0, 0, 0, 0}, 75, 171},
-	{{203, 163, 52, 247, 28, 119, 56, 223, 138, 70, 174, 97, 77, 59, 46, 0}, 120, 202},
-	{{251, 50, 228, 178, 202, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 113},
-	{{217, 159, 164, 199, 14, 237, 170, 184, 100, 231, 92, 222, 0, 0, 0, 0}, 96, 187},
-	{{16, 161, 85, 193, 202, 21, 3, 155, 63, 116, 124, 203, 34, 13, 215, 0}, 120, 38},
-	{{111, 52, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 35},
-	{{69, 12, 116, 151, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 115},
-	{{187, 60, 97, 40, 112, 101, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 18},
-	{{230, 194, 136, 255, 206, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 34},
-	{{179, 239, 170, 107, 3, 13, 212, 67, 177, 69, 8, 0, 0, 0, 0, 0}, 87, 75},
-	{{11, 58, 130, 89, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 232},
-	{{217, 178, 43, 203, 234, 20, 234, 186, 157, 88, 146, 192, 0, 0, 0, 0}, 91, 154},
-	{{6, 180, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 195},
-	{{157, 154, 218, 158, 39, 224, 103, 230, 164, 0, 0, 0, 0, 0, 0, 0}, 70, 122},
-	{{225, 10, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 97},
-	{{16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 220},
-	{{166, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 80},
-	{{29, 190, 131, 215, 232, 246, 41, 226, 52, 192, 0, 0, 0, 0, 0, 0}, 77, 133},
-	{{138, 74, 163, 93, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 93},
-	{{229, 64, 97, 41, 28, 243, 249, 185, 97, 35, 49, 27, 175, 24, 0, 0}, 110, 176},
-	{{6, 73, 94, 160, 186, 216, 84, 117, 233, 169, 146, 234, 0, 0, 0, 0}, 95, 68},
-	{{163, 40, 242, 81, 224, 35, 72, 194, 176, 78, 224, 174, 12, 0, 0, 0}, 103, 247},
-	{{2, 205, 40, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 240},
-	{{174, 225, 240, 160, 212, 8, 246, 67, 36, 0, 0, 0, 0, 0, 0, 0}, 74, 83},
-	{{5, 117, 182, 141, 166, 249, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 132},
-	{{46, 152, 169, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 217},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 214},
-	{{233, 202, 159, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 193},
-	{{172, 54, 159, 5, 14, 245, 106, 182, 2, 0, 0, 0, 0, 0, 0, 0}, 71, 61},
-	{{241, 222, 251, 114, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 65},
-	{{31, 243, 190, 4, 207, 198, 249, 59, 167, 127, 93, 64, 0, 0, 0, 0}, 91, 108},
-	{{201, 35, 222, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 244},
-	{{187, 105, 13, 114, 238, 197, 145, 23, 169, 116, 91, 28, 0, 0, 0, 0}, 95, 194},
-	{{251, 251, 121, 168, 152, 178, 147, 188, 229, 123, 154, 242, 190, 165, 173, 48}, 124, 82},
-	{{66, 187, 191, 164, 31, 196, 40, 186, 148, 115, 134, 57, 222, 254, 48, 0}, 116, 45},
-	{{209, 17, 111, 41, 154, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 224},
-	{{40, 245, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 17},
-	{{72, 121, 151, 83, 170, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 133},
-	{{171, 172, 101, 238, 201, 148, 23, 81, 4, 11, 64, 0, 0, 0, 0, 0}, 85, 125},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 42},
-	{{20, 46, 27, 93, 195, 184, 6, 162, 109, 225, 22, 152, 0, 0, 0, 0}, 96, 140},
-	{{243, 122, 30, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 91},
-	{{89, 250, 80, 72, 148, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 92},
-	{{187, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 125},
-	{{172, 160, 143, 114, 128, 239, 174, 133, 176, 154, 159, 134, 10, 0, 0, 0}, 106, 249},
-	{{254, 202, 113, 112, 173, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 202},
-	{{80, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 107},
-	{{222, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 124},
-	{{219, 138, 253, 12, 188, 197, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 57},
-	{{124, 41, 173, 8, 202, 192, 61, 254, 174, 48, 239, 112, 0, 0, 0, 0}, 92, 181},
-	{{195, 236, 245, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 107},
-	{{83, 82, 42, 244, 136, 191, 197, 81, 91, 154, 216, 85, 29, 150, 198, 22}, 128, 101},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 102},
-	{{44, 30, 219, 248, 214, 88, 225, 132, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 136},
-	{{41, 171, 206, 178, 195, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 114},
-	{{159, 15, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 215},
-	{{42, 188, 37, 174, 86, 40, 4, 84, 174, 216, 0, 0, 0, 0, 0, 0}, 79, 249},
-	{{185, 227, 85, 177, 219, 95, 250, 227, 69, 154, 118, 0, 0, 0, 0, 0}, 88, 29},
-	{{22, 185, 238, 100, 25, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 71},
-	{{122, 149, 117, 77, 88, 250, 187, 203, 136, 22, 85, 42, 105, 234, 79, 8}, 127, 112},
-	{{93, 152, 229, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 72},
-	{{129, 37, 165, 167, 241, 24, 37, 40, 2, 128, 0, 0, 0, 0, 0, 0}, 73, 155},
-	{{30, 202, 177, 3, 253, 202, 164, 248, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 66},
-	{{176, 25, 220, 120, 194, 228, 10, 45, 225, 142, 192, 96, 0, 0, 0, 0}, 91, 77},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 109},
-	{{82, 56, 12, 204, 61, 45, 147, 240, 221, 0, 0, 0, 0, 0, 0, 0}, 72, 37},
-	{{242, 38, 240, 41, 140, 75, 250, 37, 175, 115, 97, 224, 0, 0, 0, 0}, 91, 56},
-	{{251, 192, 23, 90, 135, 56, 252, 56, 79, 219, 80, 167, 22, 0, 0, 0}, 103, 5},
-	{{62, 128, 139, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 15},
-	{{214, 1, 84, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 183},
-	{{207, 90, 237, 137, 171, 140, 227, 88, 250, 26, 197, 162, 163, 0, 0, 0}, 105, 171},
-	{{196, 151, 235, 232, 114, 248, 1, 207, 193, 184, 186, 71, 157, 0, 0, 0}, 112, 202},
-	{{152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 136},
-	{{9, 174, 211, 200, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 107},
-	{{89, 150, 95, 28, 209, 13, 125, 159, 254, 244, 110, 0, 0, 0, 0, 0}, 87, 193},
-	{{23, 28, 202, 10, 90, 158, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 4},
-	{{48, 25, 180, 9, 84, 236, 6, 144, 30, 198, 41, 56, 0, 0, 0, 0}, 96, 68},
-	{{252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 40},
-	{{20, 165, 57, 130, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 255},
-	{{167, 56, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 108},
-	{{91, 204, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 219},
-	{{24, 46, 9, 4, 170, 150, 56, 130, 127, 120, 118, 104, 168, 48, 0, 0}, 108, 12},
-	{{156, 60, 245, 247, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 84},
-	{{148, 104, 187, 174, 129, 28, 127, 162, 92, 222, 52, 18, 0, 0, 0, 0}, 96, 33},
-	{{38, 253, 182, 153, 233, 194, 159, 41, 94, 193, 254, 160, 0, 0, 0, 0}, 91, 199},
-	{{156, 77, 105, 235, 145, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 52},
-	{{100, 211, 238, 147, 65, 222, 99, 73, 252, 113, 46, 113, 52, 136, 0, 0}, 113, 184},
-	{{13, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 124},
-	{{29, 240, 141, 230, 78, 237, 25, 135, 131, 6, 65, 77, 77, 248, 0, 0}, 109, 128},
-	{{15, 192, 109, 31, 149, 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 255},
-	{{80, 185, 170, 71, 41, 58, 158, 106, 253, 7, 2, 184, 173, 0, 0, 0}, 105, 146},
-	{{16, 229, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 172},
-	{{169, 2, 153, 9, 169, 203, 245, 154, 184, 0, 0, 0, 0, 0, 0, 0}, 70, 116},
-	{{144, 135, 239, 164, 142, 187, 64, 109, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 189},
-	{{170, 78, 252, 227, 242, 199, 130, 251, 200, 0, 0, 0, 0, 0, 0, 0}, 70, 10},
-	{{232, 18, 15, 126, 166, 126, 58, 25, 209, 62, 76, 79, 0, 0, 0, 0}, 98, 184},
-	{{170, 82, 72, 53, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 98},
-	{{152, 100, 37, 122, 242, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 37},
-	{{174, 231, 230, 33, 71, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 174},
-	{{74, 225, 252, 153, 202, 8, 162, 39, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 251},
-	{{167, 186, 101, 187, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 115},
-	{{83, 7, 21, 122, 243, 67, 171, 146, 145, 160, 168, 103, 223, 64, 0, 0}, 107, 252},
-	{{83, 132, 219, 86, 86, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 176},
-	{{22, 113, 72, 102, 73, 16, 236, 57, 197, 122, 31, 0, 0, 0, 0, 0}, 91, 155},
-	{{250, 59, 64, 35, 72, 112, 159, 85, 200, 5, 193, 39, 152, 185, 148, 16}, 124, 36},
-	{{220, 21, 48, 164, 224, 121, 17, 69, 10, 118, 106, 0, 0, 0, 0, 0}, 88, 202},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 208},
-	{{247, 64, 83, 125, 195, 225, 50, 76, 18, 104, 0, 0, 0, 0, 0, 0}, 77, 158},
-	{{78, 91, 31, 202, 189, 25, 13, 133, 220, 0, 0, 0, 0, 0, 0, 0}, 72, 136},
-	{{105, 197, 26, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 191},
-	{{14, 31, 154, 242, 241, 231, 55, 151, 223, 56, 134, 255, 113, 206, 69, 0}, 120, 126},
-	{{247, 193, 58, 176, 16, 71, 31, 120, 213, 104, 231, 83, 26, 118, 91, 135}, 128, 139},
-	{{136, 32, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 216},
-	{{100, 238, 112, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 93},
-	{{80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 196},
-	{{233, 224, 254, 57, 33, 205, 140, 217, 181, 72, 0, 0, 0, 0, 0, 0}, 81, 119},
-	{{107, 75, 65, 158, 128, 142, 191, 188, 188, 240, 148, 243, 116, 0, 0, 0}, 104, 93},
-	{{39, 70, 120, 114, 69, 237, 95, 48, 233, 176, 91, 154, 0, 0, 0, 0}, 96, 183},
-	{{10, 61, 43, 101, 64, 102, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 207},
-	{{151, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 102},
-	{{210, 241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 36},
-	{{52, 222, 249, 31, 108, 137, 199, 1, 242, 173, 184, 144, 0, 0, 0, 0}, 93, 41},
-	{{123, 111, 88, 192, 69, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 70},
-	{{180, 82, 188, 125, 140, 8, 196, 74, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 218},
-	{{77, 158, 34, 101, 196, 102, 56, 220, 42, 143, 181, 187, 240, 64, 161, 0}, 120, 226},
-	{{88, 220, 222, 38, 23, 108, 5, 148, 185, 110, 20, 14, 67, 61, 0, 0}, 114, 25},
-	{{90, 65, 220, 165, 197, 133, 110, 92, 228, 19, 2, 17, 0, 0, 0, 0}, 98, 6},
-	{{35, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 26},
-	{{103, 123, 49, 209, 228, 229, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 149},
-	{{50, 244, 58, 191, 95, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 127},
-	{{140, 169, 75, 77, 78, 86, 40, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 144},
-	{{99, 176, 175, 83, 114, 50, 214, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 213},
-	{{19, 208, 211, 76, 85, 176, 247, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 115},
-	{{153, 28, 188, 113, 211, 116, 7, 178, 136, 205, 96, 0, 0, 0, 0, 0}, 83, 146},
-	{{160, 180, 220, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 58},
-	{{234, 6, 112, 19, 61, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 222},
-	{{97, 110, 34, 117, 149, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 16},
-	{{99, 173, 119, 73, 250, 30, 144, 30, 128, 0, 0, 0, 0, 0, 0, 0}, 65, 169},
-	{{169, 134, 111, 89, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 175},
-	{{134, 80, 227, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 3},
-	{{231, 243, 35, 80, 75, 207, 128, 137, 54, 170, 71, 238, 0, 0, 0, 0}, 96, 2},
-	{{189, 190, 121, 135, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 193},
-	{{143, 155, 216, 193, 239, 205, 204, 153, 143, 236, 69, 23, 200, 211, 0, 0}, 118, 151},
-	{{32, 1, 115, 244, 33, 219, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 182},
-	{{220, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 148},
-	{{206, 87, 135, 235, 116, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 53},
-	{{152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 87},
-	{{58, 146, 188, 233, 230, 236, 192, 214, 168, 128, 0, 0, 0, 0, 0, 0}, 73, 235},
-	{{84, 220, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 51},
-	{{106, 145, 142, 42, 186, 186, 58, 1, 48, 98, 165, 131, 48, 156, 192, 0}, 116, 11},
-	{{53, 219, 120, 242, 166, 214, 81, 130, 64, 0, 0, 0, 0, 0, 0, 0}, 68, 28},
-	{{240, 120, 76, 163, 32, 197, 181, 251, 98, 220, 29, 226, 0, 0, 0, 0}, 96, 73},
-	{{234, 197, 12, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 216},
-	{{191, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 99},
-	{{200, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 35},
-	{{29, 129, 47, 83, 19, 75, 158, 1, 28, 24, 26, 147, 82, 119, 140, 100}, 127, 195},
-	{{241, 174, 26, 53, 152, 112, 200, 134, 84, 187, 177, 176, 42, 64, 0, 0}, 108, 176},
-	{{77, 171, 145, 48, 195, 84, 190, 36, 122, 199, 18, 0, 0, 0, 0, 0}, 87, 217},
-	{{105, 104, 135, 53, 226, 118, 238, 169, 9, 253, 132, 162, 217, 123, 191, 96}, 126, 244},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 125},
-	{{41, 85, 143, 128, 91, 137, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 219},
-	{{116, 110, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 165},
-	{{75, 213, 44, 16, 43, 157, 34, 171, 98, 117, 109, 151, 5, 60, 224, 0}, 117, 6},
-	{{229, 23, 116, 61, 80, 139, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 47},
-	{{83, 123, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 73},
-	{{151, 243, 45, 217, 216, 158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 98},
-	{{171, 184, 110, 211, 237, 114, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 21},
-	{{7, 246, 199, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 142},
-	{{103, 47, 70, 17, 31, 232, 44, 75, 145, 155, 100, 216, 0, 0, 0, 0}, 93, 34},
-	{{65, 170, 169, 100, 167, 147, 142, 251, 20, 64, 0, 0, 0, 0, 0, 0}, 74, 41},
-	{{235, 6, 229, 248, 151, 137, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 80},
-	{{156, 39, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 11},
-	{{92, 188, 82, 192, 142, 249, 190, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 254},
-	{{253, 218, 181, 46, 134, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 95},
-	{{189, 19, 31, 244, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 8},
-	{{30, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 212},
-	{{81, 226, 13, 173, 79, 123, 223, 124, 108, 80, 83, 238, 0, 0, 0, 0}, 95, 217},
-	{{126, 211, 206, 82, 147, 215, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 15},
-	{{42, 229, 135, 197, 196, 243, 94, 181, 133, 34, 16, 0, 0, 0, 0, 0}, 84, 66},
-	{{68, 210, 158, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 122},
-	{{183, 63, 223, 94, 81, 41, 203, 20, 236, 212, 220, 199, 0, 0, 0, 0}, 97, 12},
-	{{131, 146, 2, 125, 174, 43, 231, 20, 194, 0, 0, 0, 0, 0, 0, 0}, 71, 171},
-	{{31, 180, 246, 158, 28, 192, 236, 39, 237, 55, 74, 195, 171, 192, 0, 0}, 106, 42},
-	{{179, 10, 70, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 194},
-	{{147, 51, 85, 185, 234, 209, 236, 87, 147, 17, 7, 68, 148, 32, 0, 0}, 107, 237},
-	{{177, 178, 6, 40, 46, 166, 87, 198, 214, 234, 23, 224, 0, 0, 0, 0}, 93, 151},
-	{{201, 53, 40, 20, 49, 4, 38, 139, 133, 217, 214, 134, 89, 200, 0, 0}, 109, 238},
-	{{4, 26, 181, 37, 206, 129, 233, 32, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 128},
-	{{81, 58, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 227},
-	{{18, 238, 250, 161, 57, 246, 208, 118, 14, 76, 73, 25, 65, 22, 152, 120}, 127, 138},
-	{{31, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 60},
-	{{115, 195, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 148},
-	{{116, 22, 75, 33, 16, 129, 35, 124, 10, 112, 31, 213, 181, 108, 177, 46}, 128, 129},
-	{{117, 214, 20, 80, 83, 51, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 202},
-	{{120, 75, 124, 149, 120, 123, 242, 151, 181, 164, 128, 0, 0, 0, 0, 0}, 81, 88},
-	{{87, 238, 168, 62, 88, 166, 52, 104, 219, 169, 93, 128, 0, 0, 0, 0}, 90, 3},
-	{{237, 44, 224, 146, 52, 85, 245, 192, 65, 137, 37, 95, 156, 176, 0, 0}, 108, 243},
-	{{214, 241, 51, 63, 73, 61, 193, 165, 23, 108, 0, 0, 0, 0, 0, 0}, 80, 95},
-	{{87, 242, 21, 157, 45, 188, 36, 62, 66, 243, 64, 0, 0, 0, 0, 0}, 87, 255},
-	{{0, 97, 220, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 48},
-	{{227, 206, 189, 31, 222, 8, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 38},
-	{{174, 27, 0, 16, 13, 150, 33, 122, 154, 59, 236, 35, 248, 178, 64, 0}, 115, 20},
-	{{39, 20, 125, 69, 252, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 41},
-	{{141, 232, 1, 12, 125, 229, 168, 14, 125, 116, 180, 0, 0, 0, 0, 0}, 92, 133},
-	{{93, 238, 40, 228, 254, 203, 251, 6, 60, 82, 243, 242, 0, 0, 0, 0}, 95, 189},
-	{{44, 115, 200, 17, 146, 223, 115, 253, 126, 206, 152, 90, 0, 0, 0, 0}, 95, 151},
-	{{213, 58, 235, 255, 6, 163, 61, 10, 224, 0, 0, 0, 0, 0, 0, 0}, 68, 100},
-	{{25, 86, 139, 116, 190, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 118},
-	{{113, 40, 65, 141, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 164},
-	{{149, 205, 200, 186, 19, 126, 215, 199, 94, 37, 100, 32, 128, 0, 0, 0}, 98, 71},
-	{{39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 251},
-	{{81, 87, 80, 173, 163, 166, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 51},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 185},
-	{{140, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 144},
-	{{6, 42, 1, 178, 250, 53, 186, 178, 114, 121, 192, 0, 0, 0, 0, 0}, 84, 51},
-	{{2, 17, 234, 51, 169, 5, 219, 149, 245, 237, 4, 0, 0, 0, 0, 0}, 87, 32},
-	{{112, 187, 173, 17, 229, 171, 225, 170, 8, 0, 0, 0, 0, 0, 0, 0}, 70, 137},
-	{{203, 71, 140, 237, 113, 96, 123, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 2},
-	{{99, 138, 207, 2, 244, 25, 211, 98, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 163},
-	{{114, 42, 98, 246, 252, 48, 233, 118, 63, 226, 157, 226, 192, 0, 0, 0}, 100, 162},
-	{{161, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 192},
-	{{233, 70, 240, 45, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 185},
-	{{28, 123, 31, 176, 235, 229, 169, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 51},
-	{{146, 197, 243, 235, 243, 56, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 93},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 159},
-	{{141, 92, 13, 27, 87, 241, 171, 143, 220, 0, 0, 0, 0, 0, 0, 0}, 72, 189},
-	{{164, 151, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 248},
-	{{35, 188, 248, 79, 39, 151, 232, 215, 248, 245, 185, 144, 78, 102, 173, 128}, 123, 38},
-	{{193, 232, 166, 60, 62, 80, 230, 225, 165, 240, 0, 0, 0, 0, 0, 0}, 76, 167},
-	{{109, 229, 118, 155, 43, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 28},
-	{{160, 62, 63, 212, 218, 138, 154, 108, 163, 127, 197, 237, 183, 44, 140, 192}, 125, 37},
-	{{196, 37, 51, 146, 26, 85, 53, 31, 216, 141, 52, 218, 153, 32, 0, 0}, 107, 234},
-	{{228, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 70},
-	{{154, 248, 20, 242, 154, 244, 63, 17, 121, 52, 70, 84, 118, 208, 0, 0}, 108, 50},
-	{{41, 100, 27, 84, 106, 112, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 171},
-	{{81, 99, 197, 139, 30, 150, 230, 216, 81, 190, 84, 165, 29, 64, 128, 0}, 113, 236},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 3},
-	{{164, 119, 253, 126, 160, 249, 183, 191, 119, 111, 224, 0, 0, 0, 0, 0}, 86, 64},
-	{{138, 58, 198, 254, 0, 197, 60, 91, 132, 199, 181, 251, 78, 160, 0, 0}, 108, 213},
-	{{209, 89, 168, 236, 146, 169, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 15},
-	{{131, 210, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 145},
-	{{165, 190, 157, 7, 131, 5, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 27},
-	{{179, 226, 57, 204, 187, 70, 52, 81, 119, 162, 229, 42, 47, 185, 9, 162}, 127, 75},
-	{{98, 235, 155, 51, 107, 167, 127, 137, 254, 246, 162, 171, 180, 13, 233, 0}, 123, 76},
-	{{107, 79, 76, 90, 94, 151, 155, 31, 33, 115, 19, 204, 98, 115, 0, 0}, 113, 247},
-	{{143, 46, 30, 175, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 121},
-	{{155, 85, 217, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 214},
-	{{58, 62, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 221},
-	{{92, 155, 53, 3, 39, 108, 155, 200, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 102},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 191},
-	{{63, 134, 251, 59, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 197},
-	{{234, 149, 220, 106, 0, 144, 214, 128, 35, 102, 0, 0, 0, 0, 0, 0}, 79, 106},
+	{RTE_IPV6(0x4246, 0x9a8f, 0xc5e9, 0, 0, 0, 0, 0), 50, 146},
+	{RTE_IPV6(0x6b4f, 0x12eb, 0x8e54, 0x5000, 0, 0, 0, 0), 54, 141},
+	{RTE_IPV6(0xf784, 0x7101, 0xd7f7, 0xb7ef, 0x8000, 0, 0, 0), 67, 23},
+	{RTE_IPV6(0x3013, 0x290c, 0x4c65, 0x72a0, 0x2d67, 0x8692, 0x8000, 0), 97, 252},
+	{RTE_IPV6(0x0546, 0xd0aa, 0x1300, 0x7400, 0, 0, 0, 0), 54, 6},
+	{RTE_IPV6(0x0100, 0, 0, 0, 0, 0, 0, 0), 9, 137},
+	{RTE_IPV6(0x0cbc, 0x1a12, 0, 0, 0, 0, 0, 0), 31, 9},
+	{RTE_IPV6(0x01eb, 0x65ca, 0x1a5c, 0x1716, 0xb3df, 0x8000, 0, 0), 82, 9},
+	{RTE_IPV6(0xd713, 0xe066, 0x2d85, 0x66f9, 0x3814, 0xd6db, 0x5d7d, 0x3400), 120, 163},
+	{RTE_IPV6(0xb2b7, 0x6d40, 0x8854, 0x0b35, 0xd966, 0, 0, 0), 79, 197},
+	{RTE_IPV6(0xd427, 0x9e47, 0xfd62, 0xf800, 0, 0, 0, 0), 54, 249},
+	{RTE_IPV6(0x5c3a, 0x9f82, 0x6938, 0, 0, 0, 0, 0), 47, 88},
+	{RTE_IPV6(0x768c, 0x41c6, 0xd45d, 0x9000, 0, 0, 0, 0), 52, 104},
+	{RTE_IPV6(0x5640, 0, 0, 0, 0, 0, 0, 0), 10, 36},
+	{RTE_IPV6(0x4f87, 0xf2c1, 0xc50b, 0xc800, 0, 0, 0, 0), 54, 239},
+	{RTE_IPV6(0xa3e4, 0xef50, 0x2942, 0xb0b0, 0, 0, 0, 0), 67, 201},
+	{RTE_IPV6(0x1f09, 0xe72a, 0, 0, 0, 0, 0, 0), 33, 94},
+	{RTE_IPV6(0x6c90, 0xcd27, 0xd71a, 0x6000, 0, 0, 0, 0), 51, 241},
+	{RTE_IPV6(0xf7d9, 0xac00, 0, 0, 0, 0, 0, 0), 24, 239},
+	{RTE_IPV6(0x18ba, 0x49b6, 0xf0fb, 0x7da5, 0, 0, 0, 0), 66, 151},
+	{RTE_IPV6(0xf570, 0, 0, 0, 0, 0, 0, 0), 12, 137},
+	{RTE_IPV6(0x2c5e, 0x8ae0, 0xa800, 0, 0, 0, 0, 0), 41, 231},
+	{RTE_IPV6(0xb8dd, 0x6d87, 0xe120, 0, 0, 0, 0, 0), 44, 11},
+	{RTE_IPV6(0x33b3, 0x88b8, 0x1e76, 0x1810, 0x1aa1, 0xce65, 0, 0), 96, 20},
+	{RTE_IPV6(0x302e, 0, 0, 0, 0, 0, 0, 0), 15, 68},
+	{RTE_IPV6(0x8feb, 0xeddc, 0x5977, 0xbb8f, 0xd15e, 0x2e3a, 0x7800, 0), 101, 64},
+	{RTE_IPV6(0x79be, 0x5ab1, 0x8000, 0, 0, 0, 0, 0), 33, 152},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 6, 217},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 101},
+	{RTE_IPV6(0x6fd6, 0, 0, 0, 0, 0, 0, 0), 15, 58},
+	{RTE_IPV6(0xa217, 0x3440, 0, 0, 0, 0, 0, 0), 27, 254},
+	{RTE_IPV6(0x4c67, 0x2c4f, 0, 0, 0, 0, 0, 0), 32, 148},
+	{RTE_IPV6(0x5055, 0xdbd6, 0x0c04, 0x4181, 0xa294, 0xd04e, 0x2745, 0x5eb8), 126, 126},
+	{RTE_IPV6(0x5036, 0xfb1c, 0x9817, 0xf4c0, 0x9753, 0x0690, 0xdfd5, 0xe080), 123, 76},
+	{RTE_IPV6(0x27e8, 0xed67, 0xbfbc, 0x2400, 0, 0, 0, 0), 54, 240},
+	{RTE_IPV6(0x14e7, 0x59d2, 0xa7ad, 0x5000, 0, 0, 0, 0), 54, 33},
+	{RTE_IPV6(0x7d43, 0xc680, 0, 0, 0, 0, 0, 0), 25, 47},
+	{RTE_IPV6(0x1aef, 0x9905, 0xd579, 0x1f72, 0xa12e, 0x540f, 0x94a0, 0), 109, 41},
+	{RTE_IPV6(0x66d4, 0x9f76, 0xdf73, 0x86ac, 0, 0, 0, 0), 62, 72},
+	{RTE_IPV6(0x55b5, 0xf17f, 0x032c, 0, 0, 0, 0, 0), 46, 43},
+	{RTE_IPV6(0x3dc7, 0x83e2, 0x03e6, 0x5e77, 0xf000, 0, 0, 0), 68, 26},
+	{RTE_IPV6(0x008f, 0xa0b8, 0xa2c0, 0, 0, 0, 0, 0), 42, 139},
+	{RTE_IPV6(0xaa18, 0, 0, 0, 0, 0, 0, 0), 13, 219},
+	{RTE_IPV6(0x3d7a, 0x18fb, 0x7c7a, 0xcac0, 0, 0, 0, 0), 58, 105},
+	{RTE_IPV6(0x21db, 0xe203, 0xb4be, 0, 0, 0, 0, 0), 47, 210},
+	{RTE_IPV6(0x33fb, 0, 0, 0, 0, 0, 0, 0), 17, 151},
+	{RTE_IPV6(0x6ab9, 0x0b7a, 0xc5c0, 0, 0, 0, 0, 0), 42, 28},
+	{RTE_IPV6(0xc000, 0, 0, 0, 0, 0, 0, 0), 9, 64},
+	{RTE_IPV6(0xefc3, 0x4def, 0x839c, 0x02f6, 0xbfb2, 0xcca0, 0x15d5, 0x1e80), 121, 9},
+	{RTE_IPV6(0x8dcf, 0xb563, 0x37f5, 0x97e4, 0x4132, 0x5510, 0, 0), 92, 250},
+	{RTE_IPV6(0x6e9f, 0xe6fb, 0xe0d2, 0x3a31, 0, 0, 0, 0), 66, 200},
+	{RTE_IPV6(0x861a, 0x6820, 0x8129, 0xc932, 0xa445, 0xb29c, 0x9c85, 0x08da), 127, 132},
+	{RTE_IPV6(0xfdcf, 0x7469, 0xd2a6, 0xba63, 0xb600, 0, 0, 0), 71, 182},
+	{RTE_IPV6(0xd349, 0x2650, 0xb7a8, 0x348a, 0x19d6, 0x7008, 0xfc00, 0), 102, 7},
+	{RTE_IPV6(0xc8f4, 0x6cee, 0xa48d, 0xd727, 0xe9f9, 0x7850, 0x7000, 0), 100, 146},
+	{RTE_IPV6(0x6b2c, 0xfaca, 0x4025, 0x6b69, 0x8c00, 0, 0, 0), 70, 98},
+	{RTE_IPV6(0x5d56, 0x381b, 0x9fc3, 0x7e27, 0xf0c9, 0x3000, 0, 0), 86, 179},
+	{RTE_IPV6(0x20ca, 0xd6f2, 0x278d, 0x3d92, 0x8a60, 0, 0, 0), 77, 245},
+	{RTE_IPV6(0xa74d, 0xf91c, 0xd2c4, 0xe3f1, 0, 0, 0, 0), 64, 2},
+	{RTE_IPV6(0xf13b, 0x8000, 0, 0, 0, 0, 0, 0), 17, 5},
+	{RTE_IPV6(0x8f44, 0x92d2, 0xad9b, 0xfbad, 0, 0, 0, 0), 66, 169},
+	{RTE_IPV6(0xa7b4, 0xe290, 0, 0, 0, 0, 0, 0), 33, 52},
+	{RTE_IPV6(0xf100, 0, 0, 0, 0, 0, 0, 0), 9, 177},
+	{RTE_IPV6(0xee09, 0xa860, 0, 0, 0, 0, 0, 0), 27, 74},
+	{RTE_IPV6(0xcb94, 0x1060, 0x7d12, 0x5601, 0x5bf4, 0xfb14, 0x1f0e, 0x4b80), 122, 212},
+	{RTE_IPV6(0x6fe3, 0x895e, 0x4115, 0x4d89, 0x7782, 0x9f13, 0x9f2d, 0x12c0), 122, 238},
+	{RTE_IPV6(0x3b90, 0, 0, 0, 0, 0, 0, 0), 19, 18},
+	{RTE_IPV6(0x6ec0, 0xff78, 0x54d7, 0x0382, 0x26e0, 0, 0, 0), 75, 155},
+	{RTE_IPV6(0x984f, 0xdb00, 0, 0, 0, 0, 0, 0), 24, 97},
+	{RTE_IPV6(0x76ba, 0x9df8, 0, 0, 0, 0, 0, 0), 32, 8},
+	{RTE_IPV6(0x4680, 0, 0, 0, 0, 0, 0, 0), 9, 123},
+	{RTE_IPV6(0xfd77, 0x72e3, 0x12f3, 0x513d, 0xee6b, 0xbe90, 0, 0), 92, 11},
+	{RTE_IPV6(0xa670, 0, 0, 0, 0, 0, 0, 0), 13, 211},
+	{RTE_IPV6(0x2b5f, 0xe000, 0, 0, 0, 0, 0, 0), 20, 116},
+	{RTE_IPV6(0x5e80, 0, 0, 0, 0, 0, 0, 0), 11, 57},
+	{RTE_IPV6(0xb6fb, 0xc384, 0x4207, 0xd092, 0xdfe7, 0xd3b5, 0x19b0, 0), 108, 178},
+	{RTE_IPV6(0x98a6, 0x6fe9, 0xc211, 0xe629, 0xddfd, 0x457b, 0x6c00, 0), 102, 93},
+	{RTE_IPV6(0x6a8d, 0xebbe, 0x52f1, 0x98ba, 0xc351, 0x5690, 0, 0), 92, 3},
+	{RTE_IPV6(0x2051, 0xd299, 0x971d, 0x0b3e, 0x7fb1, 0xc2fe, 0x6753, 0x3a80), 121, 162},
+	{RTE_IPV6(0x4f70, 0xe01a, 0xae27, 0x62b5, 0x7339, 0xd1bd, 0x8830, 0), 109, 125},
+	{RTE_IPV6(0x6ac5, 0x5397, 0x4000, 0, 0, 0, 0, 0), 34, 33},
+	{RTE_IPV6(0xbe00, 0, 0, 0, 0, 0, 0, 0), 9, 254},
+	{RTE_IPV6(0x9c49, 0xf994, 0x37c0, 0x142a, 0x8e80, 0, 0, 0), 74, 66},
+	{RTE_IPV6(0x406b, 0x2478, 0, 0, 0, 0, 0, 0), 30, 4},
+	{RTE_IPV6(0x7394, 0x47fa, 0x9eae, 0xa8f9, 0x6a6e, 0xc400, 0, 0), 86, 122},
+	{RTE_IPV6(0x128b, 0x982c, 0x2658, 0, 0, 0, 0, 0), 46, 59},
+	{RTE_IPV6(0x37e5, 0x756a, 0x925f, 0x4adc, 0x7a00, 0x54ca, 0xb78a, 0x7800), 117, 99},
+	{RTE_IPV6(0x99d3, 0x0360, 0, 0, 0, 0, 0, 0), 27, 41},
+	{RTE_IPV6(0x0100, 0, 0, 0, 0, 0, 0, 0), 8, 112},
+	{RTE_IPV6(0x31c0, 0x668e, 0xd803, 0x7240, 0xa580, 0xa800, 0, 0), 85, 255},
+	{RTE_IPV6(0xc98f, 0xf0f0, 0xd1e0, 0, 0, 0, 0, 0), 44, 106},
+	{RTE_IPV6(0x9e13, 0xa4c4, 0x57a2, 0x2178, 0, 0, 0, 0), 62, 170},
+	{RTE_IPV6(0x0572, 0, 0, 0, 0, 0, 0, 0), 16, 86},
+	{RTE_IPV6(0x22aa, 0xf63e, 0xc655, 0xc1e3, 0xfc44, 0, 0, 0), 79, 155},
+	{RTE_IPV6(0x1534, 0x0956, 0xe000, 0, 0, 0, 0, 0), 35, 65},
+	{RTE_IPV6(0xcb51, 0x31ab, 0xe000, 0, 0, 0, 0, 0), 36, 39},
+	{RTE_IPV6(0xd3da, 0x57f4, 0x5db5, 0x7629, 0x9c8f, 0xfe00, 0, 0), 90, 162},
+	{RTE_IPV6(0x4d40, 0, 0, 0, 0, 0, 0, 0), 10, 69},
+	{RTE_IPV6(0x9edb, 0xdb27, 0x04db, 0x643f, 0, 0, 0, 0), 64, 163},
+	{RTE_IPV6(0x3d32, 0xe801, 0xb9fc, 0xf336, 0xbdf0, 0xaac0, 0, 0), 90, 116},
+	{RTE_IPV6(0xf18f, 0x2113, 0xf737, 0x2000, 0, 0, 0, 0), 53, 19},
+	{RTE_IPV6(0x3d1c, 0x3dfc, 0x2000, 0, 0, 0, 0, 0), 36, 48},
+	{RTE_IPV6(0x6670, 0xc26c, 0x5afd, 0x8000, 0, 0, 0, 0), 49, 230},
+	{RTE_IPV6(0x4a58, 0x3a42, 0xac29, 0x90cc, 0xc3f0, 0, 0, 0), 78, 155},
+	{RTE_IPV6(0x2c94, 0xbb3a, 0xbe3b, 0xbebb, 0x7c8a, 0xde83, 0, 0), 96, 158},
+	{RTE_IPV6(0x4307, 0xd88b, 0x5de0, 0x1487, 0xba56, 0xd16f, 0x3c50, 0), 113, 252},
+	{RTE_IPV6(0xd11a, 0x0cae, 0x0565, 0xa4b5, 0xed3f, 0xc039, 0x3678, 0), 110, 176},
+	{RTE_IPV6(0x0442, 0xe834, 0xef38, 0x303a, 0xc000, 0, 0, 0), 66, 211},
+	{RTE_IPV6(0x9ea5, 0x0290, 0, 0, 0, 0, 0, 0), 28, 15},
+	{RTE_IPV6(0x55cc, 0xf5c6, 0x442c, 0x2747, 0x2000, 0, 0, 0), 68, 95},
+	{RTE_IPV6(0xb586, 0x1957, 0x8000, 0, 0, 0, 0, 0), 34, 169},
+	{RTE_IPV6(0x1ae6, 0x3d24, 0x4fc0, 0, 0, 0, 0, 0), 44, 249},
+	{RTE_IPV6(0x05aa, 0xc68b, 0x41ba, 0xbc2d, 0x2afd, 0xa559, 0xce00, 0), 105, 61},
+	{RTE_IPV6(0xd3f5, 0x3e00, 0, 0, 0, 0, 0, 0), 23, 63},
+	{RTE_IPV6(0x7544, 0, 0, 0, 0, 0, 0, 0), 14, 43},
+	{RTE_IPV6(0x6711, 0x7b66, 0x46ce, 0x5a5c, 0x7cc6, 0, 0, 0), 81, 228},
+	{RTE_IPV6(0xc0ed, 0x58f4, 0x351e, 0x3da0, 0x8f40, 0, 0, 0), 78, 165},
+	{RTE_IPV6(0xc752, 0xd9b7, 0x02b3, 0xc306, 0, 0, 0, 0), 64, 3},
+	{RTE_IPV6(0x9de6, 0x4fa2, 0x397d, 0x9800, 0, 0, 0, 0), 57, 211},
+	{RTE_IPV6(0x1b43, 0x40eb, 0x8000, 0, 0, 0, 0, 0), 33, 210},
+	{RTE_IPV6(0x489e, 0xa36a, 0xc189, 0xbe07, 0xfaa5, 0xf949, 0x4000, 0), 99, 61},
+	{RTE_IPV6(0x22c0, 0, 0, 0, 0, 0, 0, 0), 10, 120},
+	{RTE_IPV6(0xd78d, 0x5fc0, 0xbd3e, 0, 0, 0, 0, 0), 47, 94},
+	{RTE_IPV6(0x1fb5, 0x388d, 0x7880, 0, 0, 0, 0, 0), 41, 153},
+	{RTE_IPV6(0x9949, 0x8000, 0, 0, 0, 0, 0, 0), 18, 221},
+	{RTE_IPV6(0xa26b, 0x29bd, 0xa59b, 0x168b, 0xa548, 0x6000, 0, 0), 87, 163},
+	{RTE_IPV6(0xda11, 0xcca5, 0xd9fb, 0x6b2d, 0x1d0f, 0xc0a7, 0x4b00, 0), 106, 188},
+	{RTE_IPV6(0xc87c, 0xeed5, 0x23e4, 0x5e8d, 0x56bb, 0x653c, 0x7334, 0x8310), 124, 15},
+	{RTE_IPV6(0x4aed, 0xa038, 0x8dd9, 0xbf10, 0, 0, 0, 0), 63, 28},
+	{RTE_IPV6(0xa32f, 0xf267, 0xadd9, 0x589a, 0x26c8, 0x2000, 0, 0), 84, 240},
+	{RTE_IPV6(0x14e3, 0x801c, 0x9093, 0x160d, 0x5e81, 0x6b58, 0, 0), 93, 59},
+	{RTE_IPV6(0x5f90, 0xe56b, 0xda7d, 0xcce9, 0xa12a, 0xb440, 0, 0), 90, 195},
+	{RTE_IPV6(0x9bdc, 0x53d0, 0x6c10, 0x869c, 0x8000, 0, 0, 0), 66, 10},
+	{RTE_IPV6(0xb38a, 0x3750, 0xbe99, 0x0ced, 0x1678, 0x4500, 0, 0), 88, 206},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 137},
+	{RTE_IPV6(0x0377, 0x9400, 0, 0, 0, 0, 0, 0), 22, 225},
+	{RTE_IPV6(0x0dc0, 0, 0, 0, 0, 0, 0, 0), 10, 223},
+	{RTE_IPV6(0x751c, 0, 0, 0, 0, 0, 0, 0), 15, 29},
+	{RTE_IPV6(0xa413, 0xc32f, 0x88be, 0x9cff, 0x1e4a, 0x8f86, 0xa200, 0), 103, 166},
+	{RTE_IPV6(0x28eb, 0x5e87, 0x87e6, 0x4721, 0x40e9, 0, 0, 0), 80, 178},
+	{RTE_IPV6(0xde97, 0xa661, 0x81fa, 0x8c00, 0, 0, 0, 0), 55, 38},
+	{RTE_IPV6(0xae80, 0, 0, 0, 0, 0, 0, 0), 9, 141},
+	{RTE_IPV6(0x06bd, 0x6496, 0xfa0d, 0x2e62, 0xe48b, 0x3234, 0x34c4, 0x8000), 116, 230},
+	{RTE_IPV6(0x4bfc, 0x59cd, 0x2534, 0x6a4f, 0xbc78, 0x3677, 0xa000, 0), 99, 124},
+	{RTE_IPV6(0x2612, 0x9206, 0x3f40, 0xe70a, 0x98c7, 0x058f, 0x9304, 0xfc00), 118, 54},
+	{RTE_IPV6(0x6f77, 0xa933, 0, 0, 0, 0, 0, 0), 32, 162},
+	{RTE_IPV6(0x6980, 0, 0, 0, 0, 0, 0, 0), 13, 32},
+	{RTE_IPV6(0x8f39, 0x3965, 0x62b6, 0x4ae3, 0xcd8f, 0xfded, 0x0800, 0), 102, 237},
+	{RTE_IPV6(0x1e00, 0, 0, 0, 0, 0, 0, 0), 7, 215},
+	{RTE_IPV6(0x0ee8, 0x3000, 0, 0, 0, 0, 0, 0), 22, 138},
+	{RTE_IPV6(0x0e35, 0x43d8, 0xe59b, 0x958b, 0x1ffd, 0xb87e, 0x856c, 0x2800), 118, 73},
+	{RTE_IPV6(0x163a, 0x288f, 0xbc84, 0xef0e, 0xb5fc, 0x51c0, 0, 0), 90, 43},
+	{RTE_IPV6(0x0bde, 0xb9f3, 0xf896, 0x4fe6, 0xd6d5, 0x0317, 0xc1c4, 0), 112, 88},
+	{RTE_IPV6(0x0ee2, 0xc675, 0x545d, 0x1660, 0x4df1, 0xad44, 0x44cc, 0x4800), 119, 91},
+	{RTE_IPV6(0x0f67, 0xf7db, 0x968e, 0x5c32, 0x9000, 0, 0, 0), 69, 140},
+	{RTE_IPV6(0x00d5, 0x4df4, 0x4000, 0, 0, 0, 0, 0), 37, 65},
+	{RTE_IPV6(0xb2ae, 0xaeef, 0x48b5, 0x24d9, 0x28a9, 0x0c68, 0x959d, 0x7d80), 122, 201},
+	{RTE_IPV6(0x7635, 0x3711, 0x61e3, 0xf3b0, 0x0200, 0, 0, 0), 72, 69},
+	{RTE_IPV6(0x15fd, 0x042f, 0, 0, 0, 0, 0, 0), 35, 170},
+	{RTE_IPV6(0x05f9, 0xba85, 0x4400, 0, 0, 0, 0, 0), 40, 192},
+	{RTE_IPV6(0x2f4f, 0x2342, 0x0bb2, 0xa11c, 0x57b4, 0x2d80, 0, 0), 89, 21},
+	{RTE_IPV6(0xf2e3, 0x1449, 0x96c4, 0, 0, 0, 0, 0), 46, 35},
+	{RTE_IPV6(0x79a9, 0x6676, 0x9dc0, 0x9aba, 0x7e00, 0, 0, 0), 71, 235},
+	{RTE_IPV6(0x098a, 0xc000, 0, 0, 0, 0, 0, 0), 21, 240},
+	{RTE_IPV6(0x2dad, 0x0e48, 0, 0, 0, 0, 0, 0), 30, 136},
+	{RTE_IPV6(0x7f2f, 0x33c9, 0xec2d, 0x8e50, 0, 0, 0, 0), 60, 186},
+	{RTE_IPV6(0xf7e9, 0x2226, 0xb5cf, 0x7f14, 0xe076, 0x3b94, 0, 0), 95, 174},
+	{RTE_IPV6(0x7ebb, 0xc668, 0xf5df, 0xdb12, 0x1f7c, 0, 0, 0), 79, 153},
+	{RTE_IPV6(0x03a3, 0x6be4, 0xc000, 0, 0, 0, 0, 0), 35, 118},
+	{RTE_IPV6(0xa76d, 0x025f, 0x0b3e, 0x2d80, 0, 0, 0, 0), 60, 113},
+	{RTE_IPV6(0x4c00, 0, 0, 0, 0, 0, 0, 0), 6, 58},
+	{RTE_IPV6(0x3abe, 0xcc97, 0xde93, 0x2f4e, 0x26cb, 0x0911, 0x4000, 0), 101, 206},
+	{RTE_IPV6(0xfedc, 0xfedc, 0xcc4f, 0x237f, 0xf23f, 0x6ae8, 0x7fb4, 0), 111, 42},
+	{RTE_IPV6(0x4d9c, 0x08d1, 0xb525, 0x4600, 0, 0, 0, 0), 55, 230},
+	{RTE_IPV6(0x4159, 0x894c, 0xd0c7, 0xa65a, 0x8000, 0, 0, 0), 67, 6},
+	{RTE_IPV6(0x2fe8, 0, 0, 0, 0, 0, 0, 0), 13, 254},
+	{RTE_IPV6(0xac9a, 0x0c6c, 0x4d25, 0x6a08, 0xea07, 0xf8d4, 0x70a0, 0), 108, 214},
+	{RTE_IPV6(0xfe75, 0xeff4, 0x9a59, 0xa6f1, 0x0c6c, 0x7f99, 0xcea0, 0), 107, 43},
+	{RTE_IPV6(0x71a0, 0xce34, 0x8f0c, 0x0994, 0xe000, 0, 0, 0), 67, 178},
+	{RTE_IPV6(0xb282, 0, 0, 0, 0, 0, 0, 0), 16, 179},
+	{RTE_IPV6(0xe5b1, 0x1c6a, 0x3b4b, 0xb6f1, 0x244f, 0xe000, 0, 0), 87, 236},
+	{RTE_IPV6(0x9c48, 0x5dc1, 0x32eb, 0x4be4, 0x5873, 0x5977, 0x8000, 0), 98, 184},
+	{RTE_IPV6(0x1ce8, 0x1cf9, 0x5369, 0xd307, 0x8893, 0xe740, 0, 0), 91, 95},
+	{RTE_IPV6(0xd921, 0x176b, 0x4a2a, 0x87c5, 0x9022, 0x28f3, 0x0d7e, 0x2488), 127, 152},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 2, 113},
+	{RTE_IPV6(0x55ac, 0x797e, 0xd539, 0xe136, 0xc549, 0x55fb, 0x0940, 0), 108, 137},
+	{RTE_IPV6(0x682e, 0x1947, 0x56dc, 0, 0, 0, 0, 0), 46, 224},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 6, 61},
+	{RTE_IPV6(0xf171, 0xfe6a, 0x3580, 0, 0, 0, 0, 0), 41, 205},
+	{RTE_IPV6(0x1d24, 0x0cf4, 0xc57f, 0xf008, 0xa786, 0x9af8, 0xc77b, 0x8ff0), 124, 170},
+	{RTE_IPV6(0x3a1d, 0x815e, 0x2b8b, 0xe000, 0, 0, 0, 0), 53, 117},
+	{RTE_IPV6(0xd57c, 0x93c4, 0x0752, 0x4346, 0xe400, 0, 0, 0), 70, 225},
+	{RTE_IPV6(0xa4a8, 0xa18c, 0x5755, 0xfa29, 0x2200, 0, 0, 0), 72, 34},
+	{RTE_IPV6(0xba8e, 0x8000, 0, 0, 0, 0, 0, 0), 17, 5},
+	{RTE_IPV6(0xedf9, 0x0946, 0xf761, 0x4000, 0, 0, 0, 0), 50, 92},
+	{RTE_IPV6(0x9b5c, 0x91da, 0x7de2, 0xe200, 0, 0, 0, 0), 55, 230},
+	{RTE_IPV6(0x23a9, 0x3e9c, 0x5604, 0x7ddb, 0x7771, 0xbf4b, 0xc671, 0), 112, 61},
+	{RTE_IPV6(0xcf3f, 0x60ba, 0x1a44, 0x73a1, 0xa33b, 0xbea6, 0x124e, 0xe800), 117, 221},
+	{RTE_IPV6(0x5628, 0xc8c7, 0xf756, 0x9fb3, 0xbfb8, 0x75ad, 0xd39e, 0x0080), 121, 105},
+	{RTE_IPV6(0x6840, 0, 0, 0, 0, 0, 0, 0), 11, 181},
+	{RTE_IPV6(0xcd23, 0x7bb2, 0x2440, 0x3e99, 0xc3fa, 0, 0, 0), 79, 110},
+	{RTE_IPV6(0x7528, 0x399d, 0x8aa0, 0xdf3b, 0x9b91, 0x4000, 0, 0), 86, 103},
+	{RTE_IPV6(0x4aa6, 0x8c92, 0x4a48, 0xe563, 0xa77c, 0x6b75, 0xd90e, 0xf640), 123, 218},
+	{RTE_IPV6(0x0cde, 0xf4b7, 0x5392, 0x2a00, 0, 0, 0, 0), 56, 146},
+	{RTE_IPV6(0x0b62, 0x926e, 0x5f60, 0x508e, 0xe000, 0, 0, 0), 67, 90},
+	{RTE_IPV6(0xeb05, 0xbbc7, 0x1eaa, 0x52bb, 0xe49f, 0x1619, 0xcc70, 0), 108, 197},
+	{RTE_IPV6(0x2360, 0x9291, 0x9b74, 0xfcb5, 0x1dcd, 0xe6f6, 0x1e00, 0), 103, 158},
+	{RTE_IPV6(0xae26, 0x38f4, 0xe366, 0xfced, 0x8056, 0, 0, 0), 81, 118},
+	{RTE_IPV6(0x4186, 0x253a, 0x5a7d, 0x3c54, 0, 0, 0, 0), 62, 95},
+	{RTE_IPV6(0xfd75, 0x8762, 0x8000, 0, 0, 0, 0, 0), 33, 152},
+	{RTE_IPV6(0x6f73, 0xbcb8, 0x8200, 0, 0, 0, 0, 0), 45, 239},
+	{RTE_IPV6(0xca18, 0x5909, 0x952d, 0x4000, 0, 0, 0, 0), 50, 48},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 5, 228},
+	{RTE_IPV6(0xf462, 0x348c, 0, 0, 0, 0, 0, 0), 30, 247},
+	{RTE_IPV6(0x97a7, 0x2bb2, 0x74c2, 0xad7e, 0xec62, 0x2800, 0, 0), 85, 12},
+	{RTE_IPV6(0x3c40, 0, 0, 0, 0, 0, 0, 0), 10, 129},
+	{RTE_IPV6(0xd000, 0, 0, 0, 0, 0, 0, 0), 4, 50},
+	{RTE_IPV6(0x7e0b, 0xd8f2, 0x072d, 0x79d0, 0x6e87, 0xd24b, 0x3bb6, 0xe42a), 128, 250},
+	{RTE_IPV6(0xd91a, 0xb892, 0x0312, 0xf00f, 0x8708, 0, 0, 0), 77, 249},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 230},
+	{RTE_IPV6(0x911c, 0x1db8, 0x0255, 0xea87, 0x626f, 0x8820, 0, 0), 92, 228},
+	{RTE_IPV6(0x6c68, 0xfffe, 0x225f, 0x489d, 0, 0, 0, 0), 64, 181},
+	{RTE_IPV6(0x993d, 0, 0, 0, 0, 0, 0, 0), 16, 206},
+	{RTE_IPV6(0x16fa, 0x82c9, 0x84f8, 0xbd6c, 0, 0, 0, 0), 63, 122},
+	{RTE_IPV6(0x9ea5, 0xea12, 0x2c3d, 0x523d, 0xeb00, 0, 0, 0), 72, 81},
+	{RTE_IPV6(0xec39, 0x7c6e, 0x7cda, 0x5246, 0x8e4e, 0x1280, 0, 0), 95, 175},
+	{RTE_IPV6(0x5ed1, 0xc8c9, 0x95a2, 0xf886, 0xefe2, 0x01ed, 0x1086, 0x3800), 118, 170},
+	{RTE_IPV6(0xbb2a, 0x1f90, 0xec46, 0, 0, 0, 0, 0), 47, 174},
+	{RTE_IPV6(0x5ad6, 0xb980, 0, 0, 0, 0, 0, 0), 29, 104},
+	{RTE_IPV6(0xc2dc, 0xd3d4, 0xd320, 0xc462, 0x473e, 0x9967, 0x5023, 0x8000), 114, 113},
+	{RTE_IPV6(0x18ff, 0x9e40, 0xb494, 0x0a51, 0xf3f7, 0, 0, 0), 80, 89},
+	{RTE_IPV6(0xe79b, 0x64f2, 0x70a0, 0xa05f, 0x62fd, 0xdb15, 0xef5a, 0), 113, 151},
+	{RTE_IPV6(0xe160, 0, 0, 0, 0, 0, 0, 0), 11, 108},
+	{RTE_IPV6(0x8800, 0, 0, 0, 0, 0, 0, 0), 7, 224},
+	{RTE_IPV6(0xfa80, 0, 0, 0, 0, 0, 0, 0), 9, 95},
+	{RTE_IPV6(0x48a8, 0, 0, 0, 0, 0, 0, 0), 14, 173},
+	{RTE_IPV6(0xb933, 0x33a7, 0x122c, 0x243b, 0x2387, 0x1468, 0, 0), 93, 176},
+	{RTE_IPV6(0x3992, 0xfc3c, 0xc544, 0x27a2, 0x50c6, 0x8932, 0x615c, 0x7c00), 119, 84},
+	{RTE_IPV6(0xfe2e, 0xf269, 0x565e, 0x600e, 0x82b0, 0, 0, 0), 78, 104},
+	{RTE_IPV6(0xf7ca, 0xb04c, 0x4528, 0, 0, 0, 0, 0), 49, 236},
+	{RTE_IPV6(0x32e9, 0xcb4d, 0x2a15, 0x73a3, 0xa68a, 0xc034, 0xb225, 0x7000), 116, 153},
+	{RTE_IPV6(0x3ec0, 0, 0, 0, 0, 0, 0, 0), 11, 190},
+	{RTE_IPV6(0x3560, 0, 0, 0, 0, 0, 0, 0), 13, 202},
+	{RTE_IPV6(0xc600, 0, 0, 0, 0, 0, 0, 0), 8, 54},
+	{RTE_IPV6(0xbdea, 0x6af7, 0x4000, 0, 0, 0, 0, 0), 34, 156},
+	{RTE_IPV6(0x6e18, 0xe441, 0xd893, 0x0930, 0x3cb3, 0xac5b, 0x73b9, 0xe360), 126, 245},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 6, 218},
+	{RTE_IPV6(0x4ab1, 0x59da, 0xf812, 0xb027, 0x76ad, 0xc998, 0, 0), 93, 72},
+	{RTE_IPV6(0x1f0d, 0x995c, 0x1b7a, 0x96e8, 0x585f, 0xcaab, 0xd09e, 0), 112, 183},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 183},
+	{RTE_IPV6(0x3f25, 0x2e9e, 0x8b80, 0, 0, 0, 0, 0), 42, 241},
+	{RTE_IPV6(0x35d1, 0x3b0d, 0xca46, 0, 0, 0, 0, 0), 47, 106},
+	{RTE_IPV6(0xb82c, 0x95dd, 0xb400, 0, 0, 0, 0, 0), 40, 180},
+	{RTE_IPV6(0xde86, 0x253e, 0xdfc1, 0x27f6, 0x0f97, 0xc892, 0, 0), 96, 142},
+	{RTE_IPV6(0xc7b0, 0xbd25, 0xe9b1, 0xfcd8, 0x5eaf, 0xfd77, 0x6000, 0), 100, 6},
+	{RTE_IPV6(0x2cc3, 0xc96a, 0xd178, 0x7a26, 0x2b1e, 0x8e16, 0xc4af, 0x6400), 118, 33},
+	{RTE_IPV6(0x21a6, 0x0aae, 0x4000, 0, 0, 0, 0, 0), 34, 224},
+	{RTE_IPV6(0x3601, 0xbdc3, 0x8531, 0x2450, 0x8ac8, 0, 0, 0), 78, 14},
+	{RTE_IPV6(0xf100, 0, 0, 0, 0, 0, 0, 0), 10, 149},
+	{RTE_IPV6(0xdd83, 0x04f7, 0x7059, 0xbb77, 0xdb50, 0x7a9c, 0xd8a0, 0), 108, 131},
+	{RTE_IPV6(0x6614, 0x2e81, 0xcaf7, 0x8101, 0xed47, 0x673a, 0xd92c, 0x0400), 121, 133},
+	{RTE_IPV6(0x6b9c, 0x972c, 0xd762, 0xab7e, 0x5520, 0x2a80, 0, 0), 89, 33},
+	{RTE_IPV6(0x3619, 0x4650, 0, 0, 0, 0, 0, 0), 28, 204},
+	{RTE_IPV6(0x95d3, 0xf20e, 0x70db, 0xb000, 0, 0, 0, 0), 52, 43},
+	{RTE_IPV6(0x5f1a, 0x8fc1, 0x084c, 0x4000, 0, 0, 0, 0), 51, 168},
+	{RTE_IPV6(0x3f66, 0xf4b0, 0, 0, 0, 0, 0, 0), 28, 180},
+	{RTE_IPV6(0x4055, 0x7ce2, 0x3bef, 0x4082, 0x447a, 0x5d4a, 0x2025, 0), 112, 208},
+	{RTE_IPV6(0x715a, 0xfd95, 0x03da, 0x22d7, 0x038f, 0xc040, 0, 0), 90, 25},
+	{RTE_IPV6(0x4be7, 0x2105, 0x0b5e, 0x7568, 0x963c, 0x48a1, 0x6026, 0), 111, 50},
+	{RTE_IPV6(0x340d, 0xf801, 0xfb0e, 0x321d, 0xd47b, 0x82b1, 0x6560, 0), 109, 110},
+	{RTE_IPV6(0xf8dd, 0x9684, 0xfc52, 0x6002, 0x50e8, 0x61ef, 0xfd40, 0), 109, 21},
+	{RTE_IPV6(0x884d, 0xa4a1, 0xc000, 0, 0, 0, 0, 0), 36, 147},
+	{RTE_IPV6(0x0121, 0x42fe, 0x9080, 0, 0, 0, 0, 0), 43, 56},
+	{RTE_IPV6(0xb519, 0xbae1, 0x6dbe, 0x4c9e, 0x767a, 0x1440, 0x7d37, 0x0800), 117, 144},
+	{RTE_IPV6(0xbfbb, 0xa08c, 0x1106, 0x5078, 0xecd4, 0x6890, 0x8000, 0), 100, 198},
+	{RTE_IPV6(0xc93d, 0x96fe, 0x464d, 0xd6d3, 0xaba3, 0xf540, 0, 0), 90, 235},
+	{RTE_IPV6(0x8fe2, 0xbe32, 0xfc00, 0, 0, 0, 0, 0), 38, 105},
+	{RTE_IPV6(0x41a8, 0xe224, 0xc950, 0, 0, 0, 0, 0), 45, 138},
+	{RTE_IPV6(0x8828, 0x415a, 0x2f10, 0x8000, 0, 0, 0, 0), 49, 122},
+	{RTE_IPV6(0x5ebd, 0xe0c8, 0xaa0b, 0x4fac, 0, 0, 0, 0), 65, 193},
+	{RTE_IPV6(0xec29, 0xa9ea, 0x0e80, 0, 0, 0, 0, 0), 43, 231},
+	{RTE_IPV6(0x0128, 0x8c5f, 0x51ad, 0xfaf8, 0, 0, 0, 0), 64, 250},
+	{RTE_IPV6(0x53b0, 0x9270, 0x599c, 0x39dc, 0x7d30, 0x2c00, 0, 0), 86, 24},
+	{RTE_IPV6(0x4c7d, 0xe4f9, 0xf3a0, 0x6a00, 0, 0, 0, 0), 55, 191},
+	{RTE_IPV6(0x0acb, 0xcc31, 0xd473, 0x7d04, 0xef7a, 0x5122, 0x01c6, 0xd800), 117, 111},
+	{RTE_IPV6(0x4ad6, 0x172c, 0xd328, 0xa13d, 0xedbe, 0x9b3b, 0xad2a, 0), 111, 205},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 1, 133},
+	{RTE_IPV6(0x7f00, 0x823d, 0xd105, 0xe823, 0x232a, 0x7234, 0xa9ea, 0xbf00), 122, 122},
+	{RTE_IPV6(0xc96b, 0xd20d, 0xbb3e, 0x911c, 0x1fbd, 0x3800, 0, 0), 87, 227},
+	{RTE_IPV6(0x93ab, 0x3f91, 0x2fa0, 0, 0, 0, 0, 0), 46, 53},
+	{RTE_IPV6(0x5de8, 0x0a61, 0x15f3, 0xd587, 0xc800, 0, 0, 0), 72, 224},
+	{RTE_IPV6(0x9079, 0x2940, 0, 0, 0, 0, 0, 0), 26, 199},
+	{RTE_IPV6(0x7469, 0x8000, 0, 0, 0, 0, 0, 0), 17, 79},
+	{RTE_IPV6(0x8e95, 0x1800, 0, 0, 0, 0, 0, 0), 21, 19},
+	{RTE_IPV6(0x6100, 0xe49e, 0x32e9, 0xfbf9, 0x0042, 0xc5e2, 0, 0), 96, 211},
+	{RTE_IPV6(0x72e4, 0xc79b, 0xaf68, 0x1ad5, 0x42f9, 0x78da, 0xa4fc, 0xd400), 120, 6},
+	{RTE_IPV6(0xe0a6, 0x4cc8, 0x793c, 0x6e41, 0x3c5f, 0x89be, 0x5cda, 0xda00), 121, 143},
+	{RTE_IPV6(0x8bdb, 0x5ce0, 0, 0, 0, 0, 0, 0), 31, 135},
+	{RTE_IPV6(0xcbed, 0x40bd, 0x1c0d, 0x4bc5, 0xdbf3, 0xac03, 0x8e20, 0), 109, 21},
+	{RTE_IPV6(0xedba, 0x58fe, 0x7c00, 0, 0, 0, 0, 0), 38, 220},
+	{RTE_IPV6(0xb6e6, 0x5da2, 0x8119, 0x38c4, 0x7000, 0, 0, 0), 68, 151},
+	{RTE_IPV6(0xf52d, 0x45e2, 0x5ad4, 0xfe10, 0, 0, 0, 0), 60, 111},
+	{RTE_IPV6(0x6be5, 0xf000, 0, 0, 0, 0, 0, 0), 20, 63},
+	{RTE_IPV6(0x77d0, 0xb1eb, 0xdefc, 0xdb00, 0, 0, 0, 0), 57, 112},
+	{RTE_IPV6(0xb297, 0xdca2, 0x7880, 0, 0, 0, 0, 0), 41, 48},
+	{RTE_IPV6(0x6d1a, 0x5faa, 0xa697, 0x8953, 0xe252, 0x0572, 0xfdd2, 0x120c), 126, 100},
+	{RTE_IPV6(0x7e1b, 0xfc13, 0xdb81, 0x7930, 0, 0, 0, 0), 60, 156},
+	{RTE_IPV6(0xd3c3, 0x9891, 0x9a5d, 0xe4d7, 0x8765, 0x1c52, 0, 0), 95, 120},
+	{RTE_IPV6(0xfc6b, 0, 0, 0, 0, 0, 0, 0), 16, 5},
+	{RTE_IPV6(0xc000, 0, 0, 0, 0, 0, 0, 0), 4, 103},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 4, 84},
+	{RTE_IPV6(0xe1b3, 0x2b2b, 0xde91, 0xcdee, 0xa49e, 0x93e5, 0x3800, 0), 101, 24},
+	{RTE_IPV6(0xd07f, 0x9718, 0x4071, 0x2f55, 0xd14f, 0x9000, 0, 0), 86, 81},
+	{RTE_IPV6(0xb290, 0xcb68, 0, 0, 0, 0, 0, 0), 29, 96},
+	{RTE_IPV6(0x38e3, 0x8b04, 0x5657, 0xb401, 0xd7a7, 0xed9c, 0x6f40, 0x2f00), 121, 6},
+	{RTE_IPV6(0x504c, 0xcc77, 0xaca9, 0xfe51, 0x68a6, 0xdb2c, 0xada1, 0xd400), 119, 40},
+	{RTE_IPV6(0x818d, 0x8b22, 0xf165, 0xdf90, 0, 0, 0, 0), 62, 143},
+	{RTE_IPV6(0x5566, 0x8962, 0x4167, 0x368e, 0x9000, 0, 0, 0), 68, 69},
+	{RTE_IPV6(0x381f, 0x9f0d, 0xc98b, 0xa11f, 0x5989, 0x0400, 0, 0), 92, 48},
+	{RTE_IPV6(0xe5dd, 0x36d8, 0xdf1b, 0xc401, 0, 0, 0, 0), 64, 115},
+	{RTE_IPV6(0x0590, 0xb02b, 0xb4bb, 0x1431, 0x3b49, 0x6c22, 0x5320, 0xc000), 115, 130},
+	{RTE_IPV6(0x18d9, 0xcdc1, 0x4a7b, 0xa06a, 0x674a, 0xc800, 0, 0), 86, 57},
+	{RTE_IPV6(0xf700, 0, 0, 0, 0, 0, 0, 0), 8, 97},
+	{RTE_IPV6(0x0c80, 0, 0, 0, 0, 0, 0, 0), 9, 146},
+	{RTE_IPV6(0xa01c, 0xc977, 0x945d, 0xfb76, 0x1cb3, 0x7b34, 0x47e8, 0x3000), 117, 194},
+	{RTE_IPV6(0x987e, 0x1136, 0x6538, 0x8201, 0xcd29, 0xcf5a, 0x977b, 0x8000), 114, 129},
+	{RTE_IPV6(0x4da5, 0x1def, 0x5ff2, 0x2201, 0x0bcc, 0x87ef, 0x8000, 0), 97, 159},
+	{RTE_IPV6(0xb76c, 0x9276, 0x4abe, 0x078d, 0x095c, 0x0202, 0x08da, 0x7800), 117, 242},
+	{RTE_IPV6(0x2598, 0x1def, 0xf235, 0x388f, 0xdb16, 0x0e9e, 0x3100, 0), 104, 162},
+	{RTE_IPV6(0xc635, 0xf166, 0xf0f4, 0x61cb, 0x3e80, 0xd5d6, 0xdc00, 0), 102, 140},
+	{RTE_IPV6(0x9059, 0x302a, 0xf9e7, 0xbdb2, 0xe8c7, 0x1e3a, 0x3f39, 0), 113, 77},
+	{RTE_IPV6(0x44d4, 0xb17b, 0x2ce0, 0x13ac, 0x5957, 0xc000, 0, 0), 82, 121},
+	{RTE_IPV6(0xfc1d, 0xb3e0, 0x0479, 0xcd43, 0x9800, 0, 0, 0), 69, 102},
+	{RTE_IPV6(0x1c6e, 0xa400, 0, 0, 0, 0, 0, 0), 23, 28},
+	{RTE_IPV6(0x1858, 0xe701, 0x0447, 0x47f1, 0xfc0e, 0xc500, 0, 0), 89, 154},
+	{RTE_IPV6(0x3f83, 0x2b4c, 0x3a8c, 0xa34a, 0x9e50, 0, 0, 0), 76, 39},
+	{RTE_IPV6(0x381c, 0x9395, 0x625d, 0xd8d8, 0xcb9c, 0, 0, 0), 78, 163},
+	{RTE_IPV6(0x86a9, 0x0667, 0xa1f4, 0x8675, 0x1000, 0, 0, 0), 68, 42},
+	{RTE_IPV6(0x8ff7, 0x7dbe, 0x6a32, 0xcc62, 0xfa97, 0xa160, 0, 0), 92, 207},
+	{RTE_IPV6(0xeb00, 0, 0, 0, 0, 0, 0, 0), 8, 25},
+	{RTE_IPV6(0x2e48, 0, 0, 0, 0, 0, 0, 0), 18, 150},
+	{RTE_IPV6(0xab23, 0x8075, 0x4a1d, 0xc743, 0x6db0, 0, 0, 0), 76, 103},
+	{RTE_IPV6(0xdce9, 0xec70, 0x8788, 0xd72b, 0x2a00, 0, 0, 0), 71, 155},
+	{RTE_IPV6(0xe40b, 0x9075, 0xcec0, 0x7619, 0x8d4e, 0x0469, 0, 0), 96, 142},
+	{RTE_IPV6(0xc343, 0xc2e5, 0x0e35, 0x8107, 0x1ed0, 0x2664, 0xb63b, 0), 112, 2},
+	{RTE_IPV6(0x194c, 0, 0, 0, 0, 0, 0, 0), 19, 59},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 4, 112},
+	{RTE_IPV6(0x1acb, 0xd998, 0x10bb, 0, 0, 0, 0, 0), 48, 166},
+	{RTE_IPV6(0xfad5, 0x0eeb, 0x6eab, 0xae17, 0x6680, 0, 0, 0), 73, 62},
+	{RTE_IPV6(0xafe6, 0xa00d, 0xbb11, 0, 0, 0, 0, 0), 50, 176},
+	{RTE_IPV6(0x5c9b, 0x9c5d, 0xbf49, 0x1c52, 0xbb81, 0x3905, 0x1000, 0), 100, 6},
+	{RTE_IPV6(0x2dcb, 0x0308, 0, 0, 0, 0, 0, 0), 29, 26},
+	{RTE_IPV6(0x7800, 0, 0, 0, 0, 0, 0, 0), 5, 6},
+	{RTE_IPV6(0xd800, 0, 0, 0, 0, 0, 0, 0), 5, 13},
+	{RTE_IPV6(0x87d7, 0x0047, 0x1800, 0, 0, 0, 0, 0), 37, 41},
+	{RTE_IPV6(0xdd95, 0x0128, 0x7000, 0, 0, 0, 0, 0), 36, 135},
+	{RTE_IPV6(0x5f8f, 0xffc2, 0x029d, 0xbf71, 0x0ae5, 0xcc38, 0, 0), 93, 171},
+	{RTE_IPV6(0xcad4, 0x6000, 0, 0, 0, 0, 0, 0), 19, 20},
+	{RTE_IPV6(0x93cb, 0xee78, 0xc217, 0x193a, 0xd0b1, 0xa900, 0, 0), 89, 119},
+	{RTE_IPV6(0x89aa, 0x71fc, 0xd7c2, 0xe092, 0xe957, 0x56c0, 0x1a2e, 0), 112, 49},
+	{RTE_IPV6(0xe000, 0, 0, 0, 0, 0, 0, 0), 4, 141},
+	{RTE_IPV6(0xfa5a, 0xf1ae, 0xa348, 0, 0, 0, 0, 0), 47, 132},
+	{RTE_IPV6(0x42be, 0xca90, 0x7a56, 0x1667, 0x6ba4, 0x3936, 0xe480, 0), 105, 176},
+	{RTE_IPV6(0x4c40, 0, 0, 0, 0, 0, 0, 0), 12, 186},
+	{RTE_IPV6(0x78f6, 0x0134, 0xbba3, 0x4e69, 0xe000, 0, 0, 0), 67, 93},
+	{RTE_IPV6(0x89f2, 0x8847, 0x620a, 0x3561, 0xa055, 0x847f, 0xb9de, 0), 111, 242},
+	{RTE_IPV6(0xff85, 0xb500, 0, 0, 0, 0, 0, 0), 24, 163},
+	{RTE_IPV6(0x80b1, 0x5c9b, 0x5bc0, 0, 0, 0, 0, 0), 42, 184},
+	{RTE_IPV6(0x2d78, 0xbac0, 0xf0c7, 0xb25f, 0x2000, 0, 0, 0), 68, 188},
+	{RTE_IPV6(0x9762, 0x67fe, 0x5a06, 0x0a6d, 0x0e9e, 0x451d, 0x8ced, 0x28e8), 126, 193},
+	{RTE_IPV6(0x94a4, 0x5155, 0x4c0e, 0x5440, 0x59b0, 0, 0, 0), 78, 63},
+	{RTE_IPV6(0x91bb, 0xa588, 0x581e, 0x6bbf, 0xcd78, 0x77d8, 0x9e7b, 0x4000), 115, 160},
+	{RTE_IPV6(0x4e78, 0x1cf3, 0xd8b4, 0x5713, 0xfd10, 0x6e21, 0xe418, 0xe800), 117, 251},
+	{RTE_IPV6(0x4a06, 0xa6a6, 0xb79d, 0x6054, 0x9700, 0, 0, 0), 72, 228},
+	{RTE_IPV6(0x5960, 0x04dd, 0xd6fd, 0x3a31, 0x0900, 0, 0, 0), 72, 168},
+	{RTE_IPV6(0x6109, 0x4000, 0, 0, 0, 0, 0, 0), 18, 194},
+	{RTE_IPV6(0xd5d7, 0x2dc8, 0xaa78, 0, 0, 0, 0, 0), 47, 166},
+	{RTE_IPV6(0x050e, 0x5c00, 0x1cf5, 0x82ca, 0x2028, 0xcf4d, 0xa6aa, 0xf640), 122, 210},
+	{RTE_IPV6(0x4d2d, 0x2b47, 0xca00, 0x9d92, 0x3b5b, 0xe100, 0, 0), 89, 254},
+	{RTE_IPV6(0x65ae, 0x5ea8, 0xa2ab, 0x470c, 0x10e0, 0, 0, 0), 75, 49},
+	{RTE_IPV6(0x3a11, 0xbbc2, 0x5749, 0xd767, 0xb40c, 0x2842, 0, 0), 96, 95},
+	{RTE_IPV6(0xa05b, 0x4451, 0x8000, 0, 0, 0, 0, 0), 33, 193},
+	{RTE_IPV6(0x5e70, 0xf90d, 0xa7f5, 0x6540, 0, 0, 0, 0), 58, 155},
+	{RTE_IPV6(0xecc2, 0, 0, 0, 0, 0, 0, 0), 15, 133},
+	{RTE_IPV6(0xa8f3, 0x67dd, 0x7800, 0, 0, 0, 0, 0), 38, 10},
+	{RTE_IPV6(0x56c2, 0xdabc, 0x8000, 0, 0, 0, 0, 0), 33, 31},
+	{RTE_IPV6(0xe803, 0x8643, 0x3fc4, 0x560e, 0xaaf3, 0x4d86, 0xbb8c, 0x4812), 127, 98},
+	{RTE_IPV6(0x37fd, 0x13c9, 0xc747, 0xe5da, 0x3640, 0x0ca2, 0, 0), 96, 22},
+	{RTE_IPV6(0x8e22, 0x2000, 0, 0, 0, 0, 0, 0), 20, 214},
+	{RTE_IPV6(0xd510, 0xd032, 0x6421, 0xc000, 0, 0, 0, 0), 50, 217},
+	{RTE_IPV6(0x75ed, 0x84b9, 0xb8f6, 0x4f2a, 0x6762, 0xa2f3, 0x8000, 0), 98, 102},
+	{RTE_IPV6(0x7819, 0xd6de, 0x3d9d, 0xcb66, 0x0392, 0xc000, 0, 0), 83, 169},
+	{RTE_IPV6(0xde2e, 0xfe40, 0, 0, 0, 0, 0, 0), 27, 152},
+	{RTE_IPV6(0xfe46, 0x9eab, 0x0bf5, 0xdf61, 0x4611, 0x1bc0, 0xba00, 0), 103, 214},
+	{RTE_IPV6(0xc080, 0xe411, 0x4414, 0x2c1f, 0x3422, 0xd401, 0xe000, 0), 99, 178},
+	{RTE_IPV6(0xede5, 0xcb08, 0x79b0, 0, 0, 0, 0, 0), 45, 164},
+	{RTE_IPV6(0x0600, 0, 0, 0, 0, 0, 0, 0), 7, 15},
+	{RTE_IPV6(0x47c5, 0xfb7a, 0x8ae8, 0x0cf1, 0x74f0, 0, 0, 0), 76, 94},
+	{RTE_IPV6(0x12f1, 0x87d2, 0xe936, 0x79b9, 0x0400, 0, 0, 0), 70, 239},
+	{RTE_IPV6(0x2032, 0xd53f, 0x49d9, 0xb415, 0xbb80, 0, 0, 0), 73, 82},
+	{RTE_IPV6(0xcba6, 0xe949, 0x5cb6, 0xd400, 0, 0, 0, 0), 55, 54},
+	{RTE_IPV6(0x38a2, 0x7e04, 0x12c3, 0xc040, 0xa49c, 0x77c4, 0x4000, 0), 98, 47},
+	{RTE_IPV6(0x7857, 0x5188, 0xb4b3, 0x4494, 0xf326, 0x5000, 0, 0), 84, 214},
+	{RTE_IPV6(0x40f4, 0xc132, 0x3000, 0, 0, 0, 0, 0), 37, 215},
+	{RTE_IPV6(0x5ba8, 0xfd9e, 0x8353, 0x9fa3, 0x71a9, 0x7000, 0, 0), 84, 153},
+	{RTE_IPV6(0x9f67, 0x6684, 0x6f2e, 0x124d, 0x240f, 0x8921, 0xb11f, 0xf3c0), 122, 245},
+	{RTE_IPV6(0x7b00, 0, 0, 0, 0, 0, 0, 0), 8, 118},
+	{RTE_IPV6(0x4351, 0xe2be, 0x074f, 0x47fa, 0x9bf5, 0x2c51, 0xd7d5, 0xabe0), 123, 128},
+	{RTE_IPV6(0x6700, 0, 0, 0, 0, 0, 0, 0), 8, 7},
+	{RTE_IPV6(0xf62c, 0xa8c8, 0xc6ee, 0x34c4, 0x7d73, 0, 0, 0), 80, 152},
+	{RTE_IPV6(0xcd0e, 0xbafc, 0xefd5, 0x3b77, 0x6925, 0x8cd1, 0x04e7, 0), 114, 248},
+	{RTE_IPV6(0x465b, 0xfe6a, 0x5e47, 0xaa13, 0x9ef2, 0xc000, 0, 0), 85, 143},
+	{RTE_IPV6(0xfa56, 0xe9b8, 0, 0, 0, 0, 0, 0), 30, 159},
+	{RTE_IPV6(0x7ade, 0x0200, 0, 0, 0, 0, 0, 0), 24, 11},
+	{RTE_IPV6(0x1be0, 0xeb46, 0, 0, 0, 0, 0, 0), 31, 110},
+	{RTE_IPV6(0xef64, 0xe003, 0x2e7f, 0x96fb, 0xcc78, 0xe440, 0, 0), 97, 181},
+	{RTE_IPV6(0x9073, 0xb6ce, 0x920d, 0x156f, 0x2546, 0xb381, 0xad52, 0x5d80), 121, 4},
+	{RTE_IPV6(0x49be, 0x39f3, 0x3133, 0x0fd1, 0, 0, 0, 0), 67, 101},
+	{RTE_IPV6(0x1240, 0, 0, 0, 0, 0, 0, 0), 11, 38},
+	{RTE_IPV6(0x1725, 0xecb1, 0xba07, 0xd187, 0x722c, 0, 0, 0), 78, 57},
+	{RTE_IPV6(0xc830, 0, 0, 0, 0, 0, 0, 0), 17, 142},
+	{RTE_IPV6(0xb5ff, 0x9900, 0, 0, 0, 0, 0, 0), 24, 184},
+	{RTE_IPV6(0x87a8, 0x0680, 0, 0, 0, 0, 0, 0), 27, 91},
+	{RTE_IPV6(0xc8e0, 0x21f5, 0x7800, 0, 0, 0, 0, 0), 41, 224},
+	{RTE_IPV6(0x466f, 0x0a3e, 0xc8e0, 0x26cc, 0x0ea4, 0, 0, 0), 78, 114},
+	{RTE_IPV6(0x9e85, 0xfc12, 0xf20c, 0x103c, 0x0534, 0xfbb3, 0x26eb, 0x0c00), 118, 184},
+	{RTE_IPV6(0x0217, 0x7430, 0, 0, 0, 0, 0, 0), 28, 215},
+	{RTE_IPV6(0x2119, 0xaa4a, 0xd786, 0x97b5, 0xafe8, 0x149b, 0xbdf2, 0x0d00), 120, 167},
+	{RTE_IPV6(0xa0ba, 0xdab7, 0xa754, 0x3b98, 0x0d89, 0x5080, 0, 0), 89, 233},
+	{RTE_IPV6(0x208d, 0xc400, 0, 0, 0, 0, 0, 0), 29, 101},
+	{RTE_IPV6(0xcf18, 0xcae2, 0xbf88, 0x4e7c, 0xa000, 0, 0, 0), 67, 139},
+	{RTE_IPV6(0xd2ad, 0xac1b, 0xc539, 0x7292, 0xa920, 0, 0, 0), 79, 32},
+	{RTE_IPV6(0x5f71, 0x0c7b, 0, 0, 0, 0, 0, 0), 32, 57},
+	{RTE_IPV6(0x816c, 0xba1c, 0x13e5, 0x6086, 0xc7fe, 0xc740, 0, 0), 91, 151},
+	{RTE_IPV6(0x67e2, 0x267b, 0x23c7, 0, 0, 0, 0, 0), 49, 0},
+	{RTE_IPV6(0x2975, 0x2b23, 0xd073, 0x4940, 0, 0, 0, 0), 63, 227},
+	{RTE_IPV6(0x2adc, 0x3d22, 0xc7b7, 0x2a10, 0xdf87, 0x0087, 0xd596, 0x6400), 118, 124},
+	{RTE_IPV6(0xa5e3, 0x60f3, 0x70ab, 0x756a, 0x3225, 0x523c, 0x5000, 0), 104, 228},
+	{RTE_IPV6(0x9e3c, 0x6f60, 0, 0, 0, 0, 0, 0), 27, 64},
+	{RTE_IPV6(0x7c6c, 0x5800, 0, 0, 0, 0, 0, 0), 25, 179},
+	{RTE_IPV6(0xe844, 0x849f, 0x9c67, 0x5fbe, 0x4c00, 0, 0, 0), 70, 107},
+	{RTE_IPV6(0x464d, 0xf0d1, 0x483f, 0x3f2d, 0x7d4f, 0x4d29, 0x0d00, 0), 104, 206},
+	{RTE_IPV6(0x92fe, 0x0705, 0x44f0, 0x43ed, 0x7000, 0, 0, 0), 68, 95},
+	{RTE_IPV6(0xa2df, 0x751b, 0x029c, 0x5eaa, 0x9d72, 0xa232, 0, 0), 96, 219},
+	{RTE_IPV6(0xa13e, 0xbf44, 0xef49, 0x6425, 0xa8fe, 0x8bca, 0xfc41, 0x4a00), 119, 138},
+	{RTE_IPV6(0xf87a, 0x7351, 0x0f9e, 0x8800, 0, 0, 0, 0), 53, 84},
+	{RTE_IPV6(0x0880, 0, 0, 0, 0, 0, 0, 0), 11, 161},
+	{RTE_IPV6(0x8e60, 0x6985, 0xfb39, 0x8000, 0, 0, 0, 0), 52, 25},
+	{RTE_IPV6(0x8ac4, 0x8b83, 0xe95d, 0x41f2, 0x56a9, 0x0748, 0x5280, 0), 107, 113},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 46},
+	{RTE_IPV6(0xaf97, 0x4bee, 0x1a0c, 0x64ba, 0, 0, 0, 0), 63, 72},
+	{RTE_IPV6(0x52cd, 0xd3b0, 0xaa4f, 0x3999, 0xa1da, 0x2030, 0, 0), 93, 230},
+	{RTE_IPV6(0xe37b, 0xe84a, 0xecca, 0xd379, 0xc808, 0x3bbd, 0x51db, 0x9000), 117, 142},
+	{RTE_IPV6(0xcdc4, 0x595a, 0x6780, 0, 0, 0, 0, 0), 41, 134},
+	{RTE_IPV6(0x3f91, 0x177f, 0x66d8, 0x3124, 0xa8a4, 0x3b85, 0x1292, 0), 112, 100},
+	{RTE_IPV6(0xd548, 0x9a10, 0xe6ec, 0xdacb, 0xdf33, 0x1ffb, 0x6740, 0), 109, 45},
+	{RTE_IPV6(0x7e94, 0xe898, 0, 0, 0, 0, 0, 0), 30, 219},
+	{RTE_IPV6(0xa048, 0, 0, 0, 0, 0, 0, 0), 16, 52},
+	{RTE_IPV6(0x8926, 0x9214, 0x63bc, 0x537b, 0x9f9f, 0x4000, 0, 0), 83, 240},
+	{RTE_IPV6(0x7be4, 0x242c, 0xf21d, 0x33e4, 0x8c3c, 0xed00, 0, 0), 90, 13},
+	{RTE_IPV6(0xa3a9, 0x1959, 0xbe72, 0xa59e, 0x8cd2, 0xc000, 0, 0), 84, 191},
+	{RTE_IPV6(0xe126, 0x4659, 0xdaec, 0x3c05, 0x45a3, 0xf832, 0xa340, 0), 106, 95},
+	{RTE_IPV6(0x5b5e, 0x2430, 0, 0, 0, 0, 0, 0), 28, 65},
+	{RTE_IPV6(0xd1ee, 0x6e00, 0x0298, 0, 0, 0, 0, 0), 45, 195},
+	{RTE_IPV6(0x3911, 0xe0a4, 0x455f, 0x8aac, 0x6f37, 0xefa7, 0xa000, 0), 103, 21},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 114},
+	{RTE_IPV6(0x6660, 0xdf1c, 0, 0, 0, 0, 0, 0), 31, 92},
+	{RTE_IPV6(0x89cc, 0x964b, 0xc100, 0, 0, 0, 0, 0), 42, 237},
+	{RTE_IPV6(0x8838, 0xfcf0, 0x5530, 0xf8e7, 0x1131, 0x2fee, 0x0fe9, 0x9fb8), 125, 172},
+	{RTE_IPV6(0x391f, 0x847b, 0xeaff, 0x2552, 0xa7cc, 0x259e, 0x8000, 0), 98, 116},
+	{RTE_IPV6(0x37c6, 0x8bdb, 0xa19c, 0x8c00, 0, 0, 0, 0), 55, 54},
+	{RTE_IPV6(0x2c00, 0, 0, 0, 0, 0, 0, 0), 8, 203},
+	{RTE_IPV6(0x3526, 0, 0, 0, 0, 0, 0, 0), 16, 74},
+	{RTE_IPV6(0xe33e, 0x6bec, 0x769c, 0x3c22, 0x1fb3, 0x4cdd, 0, 0), 96, 220},
+	{RTE_IPV6(0x6928, 0xf0d8, 0x5b3d, 0x1380, 0xe000, 0, 0, 0), 67, 219},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 179},
+	{RTE_IPV6(0x768e, 0xfbf9, 0x8069, 0x7110, 0, 0, 0, 0), 61, 194},
+	{RTE_IPV6(0x6546, 0xc4ee, 0, 0, 0, 0, 0, 0), 32, 187},
+	{RTE_IPV6(0xf5ad, 0xa5b1, 0xc8a1, 0x4000, 0, 0, 0, 0), 50, 79},
+	{RTE_IPV6(0x00c6, 0, 0, 0, 0, 0, 0, 0), 19, 87},
+	{RTE_IPV6(0x5c00, 0, 0, 0, 0, 0, 0, 0), 8, 126},
+	{RTE_IPV6(0x7d00, 0, 0, 0, 0, 0, 0, 0), 11, 106},
+	{RTE_IPV6(0x383b, 0x2352, 0x6548, 0, 0, 0, 0, 0), 50, 96},
+	{RTE_IPV6(0xb848, 0x4dfb, 0x08a6, 0, 0, 0, 0, 0), 47, 45},
+	{RTE_IPV6(0x8f4a, 0x84cd, 0xdaf7, 0x1ea0, 0x91c7, 0x8a0c, 0x59dc, 0), 110, 8},
+	{RTE_IPV6(0x1eb2, 0x6fe1, 0x494f, 0xad34, 0, 0, 0, 0), 62, 226},
+	{RTE_IPV6(0xe030, 0x9ae7, 0x2000, 0, 0, 0, 0, 0), 36, 222},
+	{RTE_IPV6(0x7b90, 0xaa8f, 0x55a9, 0x82f5, 0xd600, 0, 0, 0), 71, 218},
+	{RTE_IPV6(0xa6e0, 0xd464, 0x9537, 0x23d2, 0xf66c, 0x29f5, 0x7fae, 0x8000), 116, 59},
+	{RTE_IPV6(0x4b00, 0, 0, 0, 0, 0, 0, 0), 8, 80},
+	{RTE_IPV6(0xc580, 0xbe57, 0x2f35, 0x5c40, 0, 0, 0, 0), 58, 177},
+	{RTE_IPV6(0xf90a, 0x4cd9, 0xe114, 0x7ccd, 0x2c9f, 0xbe08, 0, 0), 98, 44},
+	{RTE_IPV6(0xb4e2, 0x00a7, 0x89e8, 0xae78, 0x715f, 0x16b8, 0, 0), 93, 206},
+	{RTE_IPV6(0x7b99, 0x66c0, 0, 0, 0, 0, 0, 0), 27, 64},
+	{RTE_IPV6(0x0590, 0xce9e, 0xefbd, 0xab78, 0x452e, 0x80ed, 0, 0), 96, 236},
+	{RTE_IPV6(0x9feb, 0x3c00, 0, 0, 0, 0, 0, 0), 22, 101},
+	{RTE_IPV6(0x2ac2, 0x9600, 0, 0, 0, 0, 0, 0), 26, 49},
+	{RTE_IPV6(0xcd60, 0, 0, 0, 0, 0, 0, 0), 11, 179},
+	{RTE_IPV6(0x1341, 0x8d14, 0x7f4d, 0x46cd, 0x9773, 0x9d17, 0x7680, 0), 109, 112},
+	{RTE_IPV6(0x600b, 0xd628, 0xf5fb, 0x3d40, 0x80f1, 0xb7b7, 0, 0), 96, 31},
+	{RTE_IPV6(0x7804, 0xeb70, 0x2280, 0, 0, 0, 0, 0), 41, 111},
+	{RTE_IPV6(0x6e7f, 0xcf4c, 0x6494, 0x82ce, 0xf902, 0x6800, 0, 0), 86, 65},
+	{RTE_IPV6(0xe2be, 0xbff9, 0xad60, 0x7fc8, 0x3e14, 0, 0, 0), 78, 222},
+	{RTE_IPV6(0x5958, 0xb60e, 0x4e7a, 0xd5c0, 0, 0, 0, 0), 58, 4},
+	{RTE_IPV6(0xa75e, 0xa3e3, 0x1c6f, 0x7567, 0xe000, 0, 0, 0), 67, 67},
+	{RTE_IPV6(0x39dc, 0x3574, 0xf3b8, 0xf286, 0x1046, 0x533d, 0xa180, 0), 109, 197},
+	{RTE_IPV6(0x3feb, 0x1400, 0, 0, 0, 0, 0, 0), 22, 121},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 167},
+	{RTE_IPV6(0x0f9f, 0x2aa7, 0x4c00, 0, 0, 0, 0, 0), 38, 140},
+	{RTE_IPV6(0xd8fc, 0x7128, 0xef2e, 0xac30, 0x67fa, 0x52b3, 0x8840, 0), 106, 193},
+	{RTE_IPV6(0x9e93, 0x102c, 0x7c38, 0x2c30, 0x8a40, 0xa900, 0, 0), 90, 47},
+	{RTE_IPV6(0xeeee, 0x6000, 0, 0, 0, 0, 0, 0), 21, 187},
+	{RTE_IPV6(0x3f9f, 0xb1a2, 0x6ad4, 0xac00, 0, 0, 0, 0), 56, 102},
+	{RTE_IPV6(0x3b28, 0xfcb9, 0xbbd8, 0, 0, 0, 0, 0), 46, 237},
+	{RTE_IPV6(0x02da, 0x0b44, 0xadc4, 0x10df, 0x0212, 0x7ad7, 0x9a00, 0), 103, 237},
+	{RTE_IPV6(0x0309, 0xce49, 0x6cc4, 0xb777, 0x8da2, 0x0ab4, 0x7320, 0), 107, 115},
+	{RTE_IPV6(0x11e3, 0xd092, 0x3fc9, 0x49ef, 0x1d4f, 0x5000, 0, 0), 84, 217},
+	{RTE_IPV6(0x73b4, 0xb0f1, 0x34d1, 0x0640, 0xbd4c, 0, 0, 0), 79, 21},
+	{RTE_IPV6(0xbf58, 0x62f5, 0x5b2e, 0x89fe, 0xaa50, 0x0b37, 0xd41c, 0x8000), 113, 3},
+	{RTE_IPV6(0x618d, 0xabaf, 0x16e9, 0, 0, 0, 0, 0), 48, 62},
+	{RTE_IPV6(0x20cc, 0x66bf, 0xa4f2, 0, 0, 0, 0, 0), 47, 80},
+	{RTE_IPV6(0x1d85, 0xd2fc, 0x7c42, 0xa000, 0, 0, 0, 0), 51, 184},
+	{RTE_IPV6(0xcfb3, 0x3690, 0x7443, 0x1d40, 0x0dc7, 0, 0, 0), 80, 197},
+	{RTE_IPV6(0x81d8, 0, 0, 0, 0, 0, 0, 0), 13, 63},
+	{RTE_IPV6(0x3298, 0xf98f, 0xaeea, 0xf030, 0x9eff, 0x5069, 0, 0), 99, 62},
+	{RTE_IPV6(0x69d0, 0x5fda, 0x2c0b, 0x5786, 0x6d12, 0x8a42, 0x1145, 0x8000), 114, 231},
+	{RTE_IPV6(0x974f, 0x9edc, 0x7a65, 0xd2a4, 0x4000, 0, 0, 0), 67, 158},
+	{RTE_IPV6(0xec61, 0x579b, 0xfe89, 0x7ad0, 0xa8c9, 0xc276, 0xe000, 0), 101, 118},
+	{RTE_IPV6(0x0ee5, 0xc1f8, 0, 0, 0, 0, 0, 0), 30, 237},
+	{RTE_IPV6(0x2e9a, 0x3250, 0x5c93, 0x9e56, 0x0170, 0, 0, 0), 79, 15},
+	{RTE_IPV6(0x5883, 0x1554, 0x3e56, 0x076e, 0x8efb, 0xf26e, 0xc2af, 0xf700), 122, 84},
+	{RTE_IPV6(0xe5d8, 0x6f5c, 0xad20, 0x3f46, 0x2454, 0x064a, 0x88a6, 0x2600), 119, 205},
+	{RTE_IPV6(0x7993, 0xd8f5, 0x25bd, 0x923f, 0x914a, 0x8000, 0, 0), 82, 220},
+	{RTE_IPV6(0x2c1a, 0xfe0b, 0xb500, 0, 0, 0, 0, 0), 40, 42},
+	{RTE_IPV6(0xd172, 0x61f9, 0xe39f, 0xe000, 0, 0, 0, 0), 51, 144},
+	{RTE_IPV6(0xb8f4, 0x2b75, 0x3800, 0, 0, 0, 0, 0), 37, 74},
+	{RTE_IPV6(0x3c51, 0x8000, 0, 0, 0, 0, 0, 0), 19, 89},
+	{RTE_IPV6(0x1228, 0x1571, 0xe25b, 0xc358, 0xa113, 0x8e00, 0, 0), 88, 77},
+	{RTE_IPV6(0x3900, 0xd49e, 0x3833, 0x6cc6, 0x3b05, 0x89c4, 0, 0), 94, 2},
+	{RTE_IPV6(0xa8fc, 0, 0, 0, 0, 0, 0, 0), 14, 75},
+	{RTE_IPV6(0x40b5, 0xfe67, 0x01e6, 0x75c7, 0x8000, 0, 0, 0), 65, 18},
+	{RTE_IPV6(0xd430, 0xd67f, 0x4eb0, 0, 0, 0, 0, 0), 46, 246},
+	{RTE_IPV6(0x9bb9, 0xeca3, 0xcc31, 0x8178, 0xb72f, 0x0af3, 0x415c, 0xc000), 114, 10},
+	{RTE_IPV6(0x5ec8, 0, 0, 0, 0, 0, 0, 0), 14, 207},
+	{RTE_IPV6(0x13d2, 0x8871, 0x494f, 0x84c4, 0xe000, 0, 0, 0), 68, 41},
+	{RTE_IPV6(0x18cb, 0xf6f2, 0xf1df, 0x96ed, 0xd5ca, 0x0b80, 0, 0), 89, 102},
+	{RTE_IPV6(0x733b, 0xabdd, 0xacb5, 0xaa43, 0x73cd, 0x2c6b, 0xa243, 0x3800), 118, 118},
+	{RTE_IPV6(0xfa80, 0, 0, 0, 0, 0, 0, 0), 10, 146},
+	{RTE_IPV6(0xcbf0, 0x1c9e, 0xb60c, 0x56b6, 0x8e2f, 0x8f39, 0xef00, 0), 104, 122},
+	{RTE_IPV6(0xc4da, 0x6d34, 0x0200, 0x4099, 0x22fa, 0xf0b9, 0x7500, 0), 107, 6},
+	{RTE_IPV6(0x8983, 0xbf28, 0x48d1, 0x4a40, 0, 0, 0, 0), 58, 18},
+	{RTE_IPV6(0xec7e, 0xa725, 0xb914, 0x22cf, 0x4c00, 0, 0, 0), 70, 83},
+	{RTE_IPV6(0x81c0, 0xf589, 0xfb34, 0x4b44, 0x5170, 0x9285, 0x4000, 0), 99, 90},
+	{RTE_IPV6(0x071f, 0x9413, 0, 0, 0, 0, 0, 0), 32, 140},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 242},
+	{RTE_IPV6(0xa732, 0xcab3, 0x4a92, 0xc000, 0, 0, 0, 0), 50, 31},
+	{RTE_IPV6(0x2cbc, 0xbafa, 0xe547, 0x1c76, 0x23fd, 0xf5bf, 0xc712, 0), 111, 9},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 230},
+	{RTE_IPV6(0x9ca3, 0xd7af, 0x4750, 0, 0, 0, 0, 0), 47, 50},
+	{RTE_IPV6(0x4318, 0x97c6, 0xf280, 0, 0, 0, 0, 0), 41, 34},
+	{RTE_IPV6(0x866b, 0x8000, 0, 0, 0, 0, 0, 0), 17, 11},
+	{RTE_IPV6(0x2300, 0, 0, 0, 0, 0, 0, 0), 8, 71},
+	{RTE_IPV6(0x2ec4, 0x5400, 0, 0, 0, 0, 0, 0), 22, 146},
+	{RTE_IPV6(0x52ac, 0x081a, 0x9a22, 0x7dbc, 0x0595, 0x9f2c, 0x4ede, 0xecb0), 124, 249},
+	{RTE_IPV6(0x4e9d, 0x4f46, 0xfc00, 0, 0, 0, 0, 0), 39, 143},
+	{RTE_IPV6(0xe705, 0xd2f7, 0xc605, 0x9dbf, 0xcee1, 0x958e, 0xcf28, 0), 110, 17},
+	{RTE_IPV6(0x26fe, 0xebc7, 0xbf3c, 0x2b9f, 0xbef3, 0xcbb9, 0xb8da, 0x8400), 119, 60},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 162},
+	{RTE_IPV6(0x5ff0, 0, 0, 0, 0, 0, 0, 0), 12, 5},
+	{RTE_IPV6(0x1180, 0xf4b2, 0xa04e, 0x535c, 0, 0, 0, 0), 62, 139},
+	{RTE_IPV6(0x1266, 0x3efb, 0x2c00, 0, 0, 0, 0, 0), 39, 8},
+	{RTE_IPV6(0x1e4b, 0x6c28, 0xe7a6, 0xe9dc, 0xa3b0, 0xfcd2, 0x3c1e, 0x8000), 114, 246},
+	{RTE_IPV6(0x1203, 0xcf40, 0x1980, 0, 0, 0, 0, 0), 42, 171},
+	{RTE_IPV6(0x3453, 0xeb3d, 0xa4ec, 0x53ad, 0x8f69, 0x0e00, 0, 0), 88, 206},
+	{RTE_IPV6(0xa6af, 0xbad0, 0, 0, 0, 0, 0, 0), 28, 163},
+	{RTE_IPV6(0xdd9a, 0x5262, 0x297e, 0x5534, 0, 0, 0, 0), 62, 166},
+	{RTE_IPV6(0x5e54, 0xb678, 0xcce8, 0x4000, 0, 0, 0, 0), 51, 128},
+	{RTE_IPV6(0x1bae, 0xe3e4, 0, 0, 0, 0, 0, 0), 31, 59},
+	{RTE_IPV6(0xda0c, 0x049c, 0, 0, 0, 0, 0, 0), 32, 179},
+	{RTE_IPV6(0x0905, 0xbec3, 0x3cd8, 0x5096, 0x8075, 0x5680, 0x8070, 0x62d0), 124, 87},
+	{RTE_IPV6(0x07e2, 0x6870, 0xd409, 0xac7c, 0xd179, 0xaae5, 0x2cb2, 0x8000), 114, 29},
+	{RTE_IPV6(0x2f47, 0xae4c, 0x3453, 0x1712, 0x6a30, 0x3820, 0, 0), 91, 184},
+	{RTE_IPV6(0x33a8, 0, 0, 0, 0, 0, 0, 0), 14, 45},
+	{RTE_IPV6(0x1cb6, 0xa77c, 0x1c16, 0x5000, 0, 0, 0, 0), 55, 144},
+	{RTE_IPV6(0x223d, 0x0e33, 0xfd11, 0x13aa, 0x31ce, 0xbccf, 0xf7a7, 0xc000), 114, 119},
+	{RTE_IPV6(0x02eb, 0x120e, 0xc342, 0xed1e, 0, 0, 0, 0), 64, 113},
+	{RTE_IPV6(0x33b6, 0x8e85, 0x7f60, 0x9f84, 0x63a1, 0x4000, 0, 0), 82, 50},
+	{RTE_IPV6(0xaa91, 0xe67b, 0xd7bd, 0x4900, 0, 0, 0, 0), 56, 207},
+	{RTE_IPV6(0x97a6, 0x2000, 0, 0, 0, 0, 0, 0), 21, 3},
+	{RTE_IPV6(0x108d, 0xc481, 0x84cf, 0x0700, 0, 0, 0, 0), 58, 13},
+	{RTE_IPV6(0xcd19, 0xb8bf, 0xc9ce, 0x6de0, 0, 0, 0, 0), 59, 42},
+	{RTE_IPV6(0x3072, 0x2167, 0xf7ff, 0x4000, 0, 0, 0, 0), 50, 31},
+	{RTE_IPV6(0xb39c, 0x7792, 0x7d15, 0x2a92, 0xedd5, 0xbf84, 0, 0), 94, 30},
+	{RTE_IPV6(0xb381, 0xba90, 0, 0, 0, 0, 0, 0), 29, 94},
+	{RTE_IPV6(0x11b3, 0xd9bc, 0x80d4, 0x0404, 0x9800, 0, 0, 0), 71, 190},
+	{RTE_IPV6(0x843f, 0x4a59, 0xd140, 0x3fc0, 0, 0, 0, 0), 59, 238},
+	{RTE_IPV6(0x1032, 0xf858, 0, 0, 0, 0, 0, 0), 30, 20},
+	{RTE_IPV6(0xbd60, 0x3a35, 0xbfeb, 0x4000, 0, 0, 0, 0), 51, 84},
+	{RTE_IPV6(0x6f62, 0x0641, 0x23c0, 0, 0, 0, 0, 0), 42, 108},
+	{RTE_IPV6(0x76df, 0x53dc, 0x6e7a, 0x1770, 0xb99b, 0x4900, 0, 0), 89, 136},
+	{RTE_IPV6(0xadbf, 0x96c5, 0xcc23, 0xa94f, 0x1fd6, 0xfbf0, 0, 0), 93, 196},
+	{RTE_IPV6(0x1a4c, 0x8130, 0, 0, 0, 0, 0, 0), 28, 67},
+	{RTE_IPV6(0xe760, 0, 0, 0, 0, 0, 0, 0), 12, 104},
+	{RTE_IPV6(0x5dac, 0xdffc, 0xcb00, 0xce00, 0, 0, 0, 0), 55, 15},
+	{RTE_IPV6(0x358e, 0xcb7c, 0x6833, 0xf10c, 0xa111, 0x65f5, 0x786e, 0xc0c7), 128, 237},
+	{RTE_IPV6(0x094d, 0x78c5, 0xc10a, 0xedae, 0xe902, 0xa50b, 0xe52f, 0x9000), 116, 224},
+	{RTE_IPV6(0x63a1, 0xbd58, 0x3000, 0, 0, 0, 0, 0), 36, 179},
+	{RTE_IPV6(0x1208, 0x4c42, 0x02b9, 0xce84, 0xe000, 0, 0, 0), 67, 84},
+	{RTE_IPV6(0xa935, 0x2000, 0, 0, 0, 0, 0, 0), 22, 65},
+	{RTE_IPV6(0x8800, 0, 0, 0, 0, 0, 0, 0), 5, 178},
+	{RTE_IPV6(0x83a2, 0x907c, 0x0c62, 0xf200, 0, 0, 0, 0), 55, 154},
+	{RTE_IPV6(0x4b32, 0x81c0, 0, 0, 0, 0, 0, 0), 27, 106},
+	{RTE_IPV6(0xd4b7, 0x28e1, 0x9888, 0xae5b, 0, 0, 0, 0), 67, 125},
+	{RTE_IPV6(0x9e00, 0, 0, 0, 0, 0, 0, 0), 9, 118},
+	{RTE_IPV6(0x0730, 0x8495, 0xa9d4, 0xc689, 0xca00, 0, 0, 0), 73, 52},
+	{RTE_IPV6(0xadc3, 0x81a3, 0x8df9, 0x2840, 0, 0, 0, 0), 58, 173},
+	{RTE_IPV6(0x6d4f, 0x4bdb, 0xcdb6, 0x16f5, 0xdf11, 0x924e, 0x6d77, 0x8000), 113, 8},
+	{RTE_IPV6(0xaec3, 0x18b6, 0xd7c6, 0xd656, 0x2280, 0, 0, 0), 74, 211},
+	{RTE_IPV6(0x1628, 0x336d, 0x465b, 0x9838, 0, 0, 0, 0), 61, 253},
+	{RTE_IPV6(0xa973, 0xf67e, 0x4176, 0xdbc0, 0, 0, 0, 0), 59, 47},
+	{RTE_IPV6(0x9a25, 0x467c, 0x6b7b, 0xe8f1, 0xa48e, 0x47e2, 0xb67e, 0), 112, 73},
+	{RTE_IPV6(0x066c, 0, 0, 0, 0, 0, 0, 0), 19, 192},
+	{RTE_IPV6(0xd8a7, 0x9e9e, 0xde13, 0x601c, 0x2806, 0x460c, 0x931b, 0x55f0), 128, 55},
+	{RTE_IPV6(0x48de, 0x3445, 0x45ce, 0xa36a, 0xebce, 0x5080, 0, 0), 94, 147},
+	{RTE_IPV6(0x9670, 0x6a38, 0x0ff3, 0x9a61, 0x866e, 0xa014, 0xb790, 0xea08), 125, 86},
+	{RTE_IPV6(0x3aba, 0x6a3a, 0x7cab, 0x3555, 0x2164, 0x4000, 0, 0), 82, 16},
+	{RTE_IPV6(0x07c3, 0x161f, 0x3ed9, 0xd12e, 0x5a31, 0xbd32, 0xa87e, 0), 111, 167},
+	{RTE_IPV6(0x5c2c, 0x9fc6, 0xb95e, 0xe7b1, 0x4000, 0, 0, 0), 67, 148},
+	{RTE_IPV6(0xa96c, 0xbea2, 0x1727, 0x8c00, 0, 0, 0, 0), 54, 66},
+	{RTE_IPV6(0xa105, 0x030b, 0x9e9d, 0xa6d4, 0xf616, 0x8c65, 0x5c00, 0), 104, 70},
+	{RTE_IPV6(0x47bf, 0, 0, 0, 0, 0, 0, 0), 16, 166},
+	{RTE_IPV6(0x3088, 0xc291, 0x3960, 0, 0, 0, 0, 0), 44, 109},
+	{RTE_IPV6(0x9000, 0, 0, 0, 0, 0, 0, 0), 5, 226},
+	{RTE_IPV6(0xdfd1, 0x0a39, 0, 0, 0, 0, 0, 0), 32, 8},
+	{RTE_IPV6(0x9a4f, 0xaa09, 0x2b8b, 0xf9b0, 0xba48, 0xd800, 0, 0), 85, 218},
+	{RTE_IPV6(0x0108, 0x7bcd, 0xa786, 0x8066, 0x0a48, 0, 0, 0), 78, 54},
+	{RTE_IPV6(0x1f69, 0x304d, 0x67bb, 0x6343, 0x6000, 0, 0, 0), 67, 48},
+	{RTE_IPV6(0x0e49, 0x364c, 0xe823, 0x2000, 0, 0, 0, 0), 51, 244},
+	{RTE_IPV6(0x0e6d, 0xfbbe, 0x24fd, 0x6378, 0x5e40, 0, 0, 0), 74, 50},
+	{RTE_IPV6(0x7aaa, 0x0986, 0x7c5b, 0x1800, 0, 0, 0, 0), 54, 173},
+	{RTE_IPV6(0xf60a, 0x5558, 0x52d9, 0x5f38, 0xd8cb, 0xa000, 0, 0), 84, 245},
+	{RTE_IPV6(0x4d64, 0x72cf, 0x96b1, 0x4586, 0x4a83, 0x9375, 0xb140, 0xd280), 121, 54},
+	{RTE_IPV6(0xab7b, 0x168a, 0x84e5, 0xfa51, 0xbae3, 0x921b, 0xaacd, 0x8000), 113, 86},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 115},
+	{RTE_IPV6(0x0c23, 0x3200, 0, 0, 0, 0, 0, 0), 24, 144},
+	{RTE_IPV6(0xff7c, 0xb3a5, 0xa9fa, 0x42ab, 0xdf7d, 0xf700, 0, 0), 89, 171},
+	{RTE_IPV6(0xf4eb, 0xd30a, 0xfbff, 0xce06, 0xc60c, 0x3288, 0, 0), 93, 231},
+	{RTE_IPV6(0xdd4d, 0xed29, 0x3221, 0x6718, 0x197f, 0xd000, 0, 0), 88, 34},
+	{RTE_IPV6(0xd845, 0x2f35, 0x7518, 0x3800, 0, 0, 0, 0), 53, 225},
+	{RTE_IPV6(0xb457, 0x19ec, 0x3800, 0, 0, 0, 0, 0), 38, 174},
+	{RTE_IPV6(0x6e20, 0x1822, 0x7485, 0xf580, 0x7b5f, 0x7d7a, 0x6481, 0x8000), 113, 37},
+	{RTE_IPV6(0x1b75, 0xb370, 0x8589, 0x6ec1, 0xf6c9, 0xdb41, 0x38ea, 0x6a80), 121, 39},
+	{RTE_IPV6(0xba75, 0xfc00, 0, 0, 0, 0, 0, 0), 23, 59},
+	{RTE_IPV6(0xf377, 0x3610, 0x8000, 0, 0, 0, 0, 0), 34, 96},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 147},
+	{RTE_IPV6(0x4e30, 0x75c8, 0xf576, 0x73f0, 0xaa7d, 0x5467, 0x21a8, 0), 110, 56},
+	{RTE_IPV6(0xc9fd, 0xb8fe, 0x8f51, 0x5f2a, 0xf393, 0x6091, 0x171a, 0), 111, 234},
+	{RTE_IPV6(0x29d7, 0x5488, 0xea00, 0, 0, 0, 0, 0), 46, 199},
+	{RTE_IPV6(0x5bf4, 0x89b8, 0xe75f, 0x870a, 0xb800, 0, 0, 0), 69, 191},
+	{RTE_IPV6(0x711f, 0xb5f5, 0x1500, 0, 0, 0, 0, 0), 40, 235},
+	{RTE_IPV6(0xb5d8, 0xc000, 0, 0, 0, 0, 0, 0), 20, 45},
+	{RTE_IPV6(0x571a, 0x77e5, 0x61ff, 0x092b, 0x2000, 0, 0, 0), 67, 164},
+	{RTE_IPV6(0xcd70, 0x43a3, 0xc494, 0x0569, 0x088a, 0x9003, 0xabd5, 0x9f80), 121, 130},
+	{RTE_IPV6(0x881b, 0xc000, 0, 0, 0, 0, 0, 0), 18, 166},
+	{RTE_IPV6(0x02af, 0x6f00, 0, 0, 0, 0, 0, 0), 24, 140},
+	{RTE_IPV6(0xde83, 0x55da, 0x10e5, 0x2ce6, 0xf34c, 0xfa8b, 0x01cb, 0x6c00), 118, 47},
+	{RTE_IPV6(0x65b4, 0x4d8e, 0xc249, 0xc4f6, 0x6b64, 0xc248, 0xcc7c, 0), 111, 148},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 103},
+	{RTE_IPV6(0x2e3e, 0xbf82, 0x6e80, 0xeb3e, 0x4427, 0x3a98, 0xcfcc, 0x6000), 116, 94},
+	{RTE_IPV6(0x6f0b, 0x6000, 0, 0, 0, 0, 0, 0), 19, 85},
+	{RTE_IPV6(0x3a2b, 0x0e5d, 0x66d2, 0x75d0, 0xdeab, 0x8229, 0x1010, 0), 109, 250},
+	{RTE_IPV6(0x8d34, 0, 0, 0, 0, 0, 0, 0), 16, 153},
+	{RTE_IPV6(0xaa99, 0xa0aa, 0x90eb, 0x7a08, 0x6a22, 0x1820, 0x6639, 0x0ca8), 125, 182},
+	{RTE_IPV6(0x2271, 0xa36b, 0x3db1, 0x27ac, 0xf202, 0x8200, 0, 0), 94, 23},
+	{RTE_IPV6(0xdebf, 0xef6e, 0xa2bf, 0xc3b5, 0x5032, 0x55f0, 0x5820, 0), 108, 38},
+	{RTE_IPV6(0xb352, 0xfd97, 0xd400, 0x48fd, 0xaf16, 0x224e, 0x3520, 0), 110, 121},
+	{RTE_IPV6(0x0aa2, 0x142e, 0xa440, 0x5801, 0xcacc, 0x7c00, 0, 0), 87, 146},
+	{RTE_IPV6(0xd263, 0, 0, 0, 0, 0, 0, 0), 16, 138},
+	{RTE_IPV6(0xb7c8, 0x0102, 0x3306, 0x428e, 0x144d, 0x30f4, 0, 0), 94, 149},
+	{RTE_IPV6(0x1d14, 0xe039, 0xcca1, 0x83fe, 0x3585, 0xa300, 0, 0), 88, 232},
+	{RTE_IPV6(0x4b3a, 0xaa34, 0x9250, 0, 0, 0, 0, 0), 45, 255},
+	{RTE_IPV6(0x5c15, 0x0171, 0xb958, 0xa000, 0, 0, 0, 0), 51, 148},
+	{RTE_IPV6(0x67b4, 0xdebb, 0x8175, 0, 0, 0, 0, 0), 48, 117},
+	{RTE_IPV6(0x204c, 0, 0, 0, 0, 0, 0, 0), 14, 237},
+	{RTE_IPV6(0x073c, 0x6000, 0, 0, 0, 0, 0, 0), 19, 113},
+	{RTE_IPV6(0xa77a, 0xcdb9, 0x15c7, 0x0600, 0, 0, 0, 0), 57, 162},
+	{RTE_IPV6(0x154e, 0, 0, 0, 0, 0, 0, 0), 21, 225},
+	{RTE_IPV6(0x5c9f, 0xa7a9, 0x88b0, 0x5fff, 0x5789, 0x7010, 0, 0), 92, 210},
+	{RTE_IPV6(0x5478, 0xa000, 0, 0, 0, 0, 0, 0), 22, 34},
+	{RTE_IPV6(0x7e05, 0x7eb0, 0, 0, 0, 0, 0, 0), 29, 224},
+	{RTE_IPV6(0x043a, 0, 0, 0, 0, 0, 0, 0), 16, 143},
+	{RTE_IPV6(0xef9a, 0xb5b6, 0xbdd3, 0xf435, 0x9000, 0, 0, 0), 68, 216},
+	{RTE_IPV6(0xfebc, 0x8ba7, 0x872f, 0x93ef, 0xbb6a, 0xe49c, 0xeaea, 0x6600), 120, 239},
+	{RTE_IPV6(0xe1a8, 0x8a5c, 0xc1ff, 0x2fe9, 0x0b9a, 0xcd56, 0xd158, 0), 111, 54},
+	{RTE_IPV6(0xdf00, 0, 0, 0, 0, 0, 0, 0), 9, 35},
+	{RTE_IPV6(0xebfc, 0x730a, 0x9768, 0xc1cf, 0x26e4, 0xe5f5, 0x2a0d, 0x6c00), 119, 230},
+	{RTE_IPV6(0x0189, 0x3524, 0xd2b8, 0, 0, 0, 0, 0), 45, 234},
+	{RTE_IPV6(0x95b6, 0x48c5, 0x5ce5, 0x090a, 0xdc80, 0x4813, 0x043a, 0xc000), 115, 70},
+	{RTE_IPV6(0x6949, 0x396c, 0, 0, 0, 0, 0, 0), 32, 246},
+	{RTE_IPV6(0xbd3d, 0xe618, 0xeb52, 0x3a66, 0x616f, 0x79fc, 0x9c5e, 0xbfa6), 127, 217},
+	{RTE_IPV6(0xc16c, 0xe756, 0x8c0e, 0xc004, 0x8750, 0x81a6, 0x9e3d, 0xe614), 128, 201},
+	{RTE_IPV6(0x6e4d, 0, 0, 0, 0, 0, 0, 0), 17, 49},
+	{RTE_IPV6(0x0366, 0x24e7, 0x0ff2, 0x8f00, 0, 0, 0, 0), 57, 2},
+	{RTE_IPV6(0x51bd, 0xdca8, 0xc000, 0, 0, 0, 0, 0), 38, 64},
+	{RTE_IPV6(0xa84b, 0x85b4, 0x5ba5, 0x4de8, 0, 0, 0, 0), 62, 239},
+	{RTE_IPV6(0x6ab3, 0xba6d, 0x51ea, 0xe9a7, 0x65a0, 0x5a66, 0xaeea, 0xd000), 116, 47},
+	{RTE_IPV6(0x2e69, 0xea15, 0x17f7, 0xa921, 0x2f05, 0, 0, 0), 80, 43},
+	{RTE_IPV6(0x9890, 0x648e, 0x8117, 0xe332, 0x4351, 0xf974, 0, 0), 94, 17},
+	{RTE_IPV6(0x6d4a, 0x91c0, 0, 0, 0, 0, 0, 0), 26, 5},
+	{RTE_IPV6(0x64f3, 0x16e6, 0x262c, 0x8056, 0x8439, 0, 0, 0), 81, 240},
+	{RTE_IPV6(0x99fb, 0x7341, 0x68b3, 0, 0, 0, 0, 0), 50, 197},
+	{RTE_IPV6(0x2b71, 0x3ce0, 0x2414, 0x2aa1, 0x18df, 0xc000, 0, 0), 84, 192},
+	{RTE_IPV6(0x3d4d, 0x79b0, 0x8a98, 0, 0, 0, 0, 0), 47, 160},
+	{RTE_IPV6(0x77c2, 0x9231, 0x3bf2, 0x19dc, 0x7a68, 0x5000, 0, 0), 84, 199},
+	{RTE_IPV6(0xfea2, 0x9b2f, 0xbb03, 0x0172, 0x8ebf, 0x982c, 0x901a, 0xca00), 127, 217},
+	{RTE_IPV6(0xb001, 0x722a, 0xbf91, 0x2b01, 0x8d12, 0x4000, 0, 0), 83, 75},
+	{RTE_IPV6(0xaaf4, 0x4384, 0x91a3, 0x4cd5, 0x55ed, 0xf816, 0xcf40, 0), 106, 222},
+	{RTE_IPV6(0x66be, 0x3a20, 0x4b0f, 0x59a3, 0x4007, 0xa800, 0, 0), 85, 39},
+	{RTE_IPV6(0x7caa, 0x232f, 0x98e0, 0, 0, 0, 0, 0), 45, 9},
+	{RTE_IPV6(0xc0dd, 0x14e4, 0, 0, 0, 0, 0, 0), 33, 217},
+	{RTE_IPV6(0xd0b2, 0x8800, 0, 0, 0, 0, 0, 0), 25, 142},
+	{RTE_IPV6(0xbc44, 0x4d1e, 0x4499, 0x66b4, 0, 0, 0, 0), 62, 18},
+	{RTE_IPV6(0x72b2, 0x79bc, 0xcde9, 0x234d, 0x22c5, 0x9eae, 0x6500, 0), 104, 180},
+	{RTE_IPV6(0xc362, 0x430c, 0x0d2b, 0x0600, 0, 0, 0, 0), 56, 205},
+	{RTE_IPV6(0x92be, 0x2ade, 0x0e36, 0x1c80, 0, 0, 0, 0), 57, 251},
+	{RTE_IPV6(0xb9ca, 0x9400, 0, 0, 0, 0, 0, 0), 22, 178},
+	{RTE_IPV6(0x8a1e, 0x815f, 0xe0a1, 0x7880, 0, 0, 0, 0), 59, 198},
+	{RTE_IPV6(0x45b5, 0x05e3, 0x4000, 0, 0, 0, 0, 0), 34, 84},
+	{RTE_IPV6(0x5ab4, 0x00a4, 0xe34b, 0xae77, 0x8000, 0, 0, 0), 66, 128},
+	{RTE_IPV6(0x143c, 0x3a77, 0xf5b1, 0xa2ba, 0x0d70, 0xd3ef, 0x8000, 0), 97, 75},
+	{RTE_IPV6(0x9e7c, 0x9d19, 0xe68b, 0x33d4, 0x4c6d, 0xecd2, 0x3000, 0), 101, 192},
+	{RTE_IPV6(0x7d6c, 0xf224, 0x5e0d, 0x246a, 0x5a33, 0x53d9, 0x8397, 0), 114, 60},
+	{RTE_IPV6(0xdeda, 0xa29e, 0x0f35, 0xbfb2, 0, 0, 0, 0), 63, 169},
+	{RTE_IPV6(0x68ca, 0x7f6d, 0x4910, 0x110c, 0, 0, 0, 0), 62, 10},
+	{RTE_IPV6(0xacab, 0xf61a, 0xb022, 0x1698, 0xf638, 0xad78, 0x693c, 0x5c00), 118, 64},
+	{RTE_IPV6(0xbe16, 0xabce, 0x6dba, 0xb380, 0xfdb6, 0x6cd4, 0xdca7, 0xabb4), 127, 182},
+	{RTE_IPV6(0x77d0, 0, 0, 0, 0, 0, 0, 0), 13, 29},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 39},
+	{RTE_IPV6(0xaa90, 0x4002, 0x6ba6, 0x8000, 0, 0, 0, 0), 51, 93},
+	{RTE_IPV6(0xea09, 0x6014, 0x9c9d, 0x0122, 0x5800, 0, 0, 0), 75, 228},
+	{RTE_IPV6(0x93ed, 0x1078, 0xc000, 0, 0, 0, 0, 0), 34, 236},
+	{RTE_IPV6(0xb6bd, 0xa29e, 0xdf5a, 0xad00, 0, 0, 0, 0), 56, 190},
+	{RTE_IPV6(0x7494, 0x8ef0, 0x0afd, 0x8000, 0, 0, 0, 0), 49, 217},
+	{RTE_IPV6(0xd349, 0x8c45, 0xfc1b, 0x4b2e, 0x2506, 0x9320, 0, 0), 93, 74},
+	{RTE_IPV6(0x943d, 0x7831, 0xdc41, 0x9600, 0, 0, 0, 0), 61, 180},
+	{RTE_IPV6(0xac23, 0xcab4, 0x814b, 0xf000, 0, 0, 0, 0), 55, 91},
+	{RTE_IPV6(0xd76d, 0x939d, 0x201c, 0x3400, 0, 0, 0, 0), 54, 230},
+	{RTE_IPV6(0x971a, 0xb670, 0xcddc, 0xa000, 0, 0, 0, 0), 52, 175},
+	{RTE_IPV6(0x495b, 0x5d3d, 0xc403, 0x421a, 0x9560, 0, 0, 0), 75, 171},
+	{RTE_IPV6(0xcba3, 0x34f7, 0x1c77, 0x38df, 0x8a46, 0xae61, 0x4d3b, 0x2e00), 120, 202},
+	{RTE_IPV6(0xfb32, 0xe4b2, 0xcac3, 0, 0, 0, 0, 0), 49, 113},
+	{RTE_IPV6(0xd99f, 0xa4c7, 0x0eed, 0xaab8, 0x64e7, 0x5cde, 0, 0), 96, 187},
+	{RTE_IPV6(0x10a1, 0x55c1, 0xca15, 0x039b, 0x3f74, 0x7ccb, 0x220d, 0xd700), 120, 38},
+	{RTE_IPV6(0x6f34, 0x0a00, 0, 0, 0, 0, 0, 0), 26, 35},
+	{RTE_IPV6(0x450c, 0x7497, 0x2000, 0, 0, 0, 0, 0), 38, 115},
+	{RTE_IPV6(0xbb3c, 0x6128, 0x7065, 0x6000, 0, 0, 0, 0), 51, 18},
+	{RTE_IPV6(0xe6c2, 0x88ff, 0xce00, 0, 0, 0, 0, 0), 39, 34},
+	{RTE_IPV6(0xb3ef, 0xaa6b, 0x030d, 0xd443, 0xb145, 0x0800, 0, 0), 87, 75},
+	{RTE_IPV6(0x0b3a, 0x8259, 0x8000, 0, 0, 0, 0, 0), 33, 232},
+	{RTE_IPV6(0xd9b2, 0x2bcb, 0xea14, 0xeaba, 0x9d58, 0x92c0, 0, 0), 91, 154},
+	{RTE_IPV6(0x06b4, 0xe000, 0, 0, 0, 0, 0, 0), 19, 195},
+	{RTE_IPV6(0x9d9a, 0xda9e, 0x27e0, 0x67e6, 0xa400, 0, 0, 0), 70, 122},
+	{RTE_IPV6(0xe10a, 0x8300, 0, 0, 0, 0, 0, 0), 24, 97},
+	{RTE_IPV6(0x1000, 0, 0, 0, 0, 0, 0, 0), 5, 220},
+	{RTE_IPV6(0xa600, 0, 0, 0, 0, 0, 0, 0), 7, 80},
+	{RTE_IPV6(0x1dbe, 0x83d7, 0xe8f6, 0x29e2, 0x34c0, 0, 0, 0), 77, 133},
+	{RTE_IPV6(0x8a4a, 0xa35d, 0x0c00, 0, 0, 0, 0, 0), 38, 93},
+	{RTE_IPV6(0xe540, 0x6129, 0x1cf3, 0xf9b9, 0x6123, 0x311b, 0xaf18, 0), 110, 176},
+	{RTE_IPV6(0x0649, 0x5ea0, 0xbad8, 0x5475, 0xe9a9, 0x92ea, 0, 0), 95, 68},
+	{RTE_IPV6(0xa328, 0xf251, 0xe023, 0x48c2, 0xb04e, 0xe0ae, 0x0c00, 0), 103, 247},
+	{RTE_IPV6(0x02cd, 0x2818, 0, 0, 0, 0, 0, 0), 29, 240},
+	{RTE_IPV6(0xaee1, 0xf0a0, 0xd408, 0xf643, 0x2400, 0, 0, 0), 74, 83},
+	{RTE_IPV6(0x0575, 0xb68d, 0xa6f9, 0xc400, 0, 0, 0, 0), 55, 132},
+	{RTE_IPV6(0x2e98, 0xa900, 0, 0, 0, 0, 0, 0), 24, 217},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 214},
+	{RTE_IPV6(0xe9ca, 0x9fdb, 0, 0, 0, 0, 0, 0), 34, 193},
+	{RTE_IPV6(0xac36, 0x9f05, 0x0ef5, 0x6ab6, 0x0200, 0, 0, 0), 71, 61},
+	{RTE_IPV6(0xf1de, 0xfb72, 0xc800, 0, 0, 0, 0, 0), 39, 65},
+	{RTE_IPV6(0x1ff3, 0xbe04, 0xcfc6, 0xf93b, 0xa77f, 0x5d40, 0, 0), 91, 108},
+	{RTE_IPV6(0xc923, 0xded0, 0, 0, 0, 0, 0, 0), 28, 244},
+	{RTE_IPV6(0xbb69, 0x0d72, 0xeec5, 0x9117, 0xa974, 0x5b1c, 0, 0), 95, 194},
+	{RTE_IPV6(0xfbfb, 0x79a8, 0x98b2, 0x93bc, 0xe57b, 0x9af2, 0xbea5, 0xad30), 124, 82},
+	{RTE_IPV6(0x42bb, 0xbfa4, 0x1fc4, 0x28ba, 0x9473, 0x8639, 0xdefe, 0x3000), 116, 45},
+	{RTE_IPV6(0xd111, 0x6f29, 0x9a5e, 0, 0, 0, 0, 0), 48, 224},
+	{RTE_IPV6(0x28f5, 0x8000, 0, 0, 0, 0, 0, 0), 22, 17},
+	{RTE_IPV6(0x4879, 0x9753, 0xaa50, 0, 0, 0, 0, 0), 44, 133},
+	{RTE_IPV6(0xabac, 0x65ee, 0xc994, 0x1751, 0x040b, 0x4000, 0, 0), 85, 125},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 42},
+	{RTE_IPV6(0x142e, 0x1b5d, 0xc3b8, 0x06a2, 0x6de1, 0x1698, 0, 0), 96, 140},
+	{RTE_IPV6(0xf37a, 0x1e50, 0, 0, 0, 0, 0, 0), 28, 91},
+	{RTE_IPV6(0x59fa, 0x5048, 0x9404, 0, 0, 0, 0, 0), 48, 92},
+	{RTE_IPV6(0xbb98, 0, 0, 0, 0, 0, 0, 0), 13, 125},
+	{RTE_IPV6(0xaca0, 0x8f72, 0x80ef, 0xae85, 0xb09a, 0x9f86, 0x0a00, 0), 106, 249},
+	{RTE_IPV6(0xfeca, 0x7170, 0xad00, 0, 0, 0, 0, 0), 40, 202},
+	{RTE_IPV6(0x5098, 0, 0, 0, 0, 0, 0, 0), 13, 107},
+	{RTE_IPV6(0xded8, 0, 0, 0, 0, 0, 0, 0), 13, 124},
+	{RTE_IPV6(0xdb8a, 0xfd0c, 0xbcc5, 0x3000, 0, 0, 0, 0), 53, 57},
+	{RTE_IPV6(0x7c29, 0xad08, 0xcac0, 0x3dfe, 0xae30, 0xef70, 0, 0), 92, 181},
+	{RTE_IPV6(0xc3ec, 0xf500, 0, 0, 0, 0, 0, 0), 24, 107},
+	{RTE_IPV6(0x5352, 0x2af4, 0x88bf, 0xc551, 0x5b9a, 0xd855, 0x1d96, 0xc616), 128, 101},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 102},
+	{RTE_IPV6(0x2c1e, 0xdbf8, 0xd658, 0xe184, 0, 0, 0, 0), 62, 136},
+	{RTE_IPV6(0x29ab, 0xceb2, 0xc350, 0, 0, 0, 0, 0), 45, 114},
+	{RTE_IPV6(0x9f0f, 0xb400, 0, 0, 0, 0, 0, 0), 22, 215},
+	{RTE_IPV6(0x2abc, 0x25ae, 0x5628, 0x0454, 0xaed8, 0, 0, 0), 79, 249},
+	{RTE_IPV6(0xb9e3, 0x55b1, 0xdb5f, 0xfae3, 0x459a, 0x7600, 0, 0), 88, 29},
+	{RTE_IPV6(0x16b9, 0xee64, 0x1910, 0, 0, 0, 0, 0), 44, 71},
+	{RTE_IPV6(0x7a95, 0x754d, 0x58fa, 0xbbcb, 0x8816, 0x552a, 0x69ea, 0x4f08), 127, 112},
+	{RTE_IPV6(0x5d98, 0xe58c, 0, 0, 0, 0, 0, 0), 31, 72},
+	{RTE_IPV6(0x8125, 0xa5a7, 0xf118, 0x2528, 0x0280, 0, 0, 0), 73, 155},
+	{RTE_IPV6(0x1eca, 0xb103, 0xfdca, 0xa4f8, 0, 0, 0, 0), 61, 66},
+	{RTE_IPV6(0xb019, 0xdc78, 0xc2e4, 0x0a2d, 0xe18e, 0xc060, 0, 0), 91, 77},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 109},
+	{RTE_IPV6(0x5238, 0x0ccc, 0x3d2d, 0x93f0, 0xdd00, 0, 0, 0), 72, 37},
+	{RTE_IPV6(0xf226, 0xf029, 0x8c4b, 0xfa25, 0xaf73, 0x61e0, 0, 0), 91, 56},
+	{RTE_IPV6(0xfbc0, 0x175a, 0x8738, 0xfc38, 0x4fdb, 0x50a7, 0x1600, 0), 103, 5},
+	{RTE_IPV6(0x3e80, 0x8b80, 0, 0, 0, 0, 0, 0), 25, 15},
+	{RTE_IPV6(0xd601, 0x54e8, 0, 0, 0, 0, 0, 0), 29, 183},
+	{RTE_IPV6(0xcf5a, 0xed89, 0xab8c, 0xe358, 0xfa1a, 0xc5a2, 0xa300, 0), 105, 171},
+	{RTE_IPV6(0xc497, 0xebe8, 0x72f8, 0x01cf, 0xc1b8, 0xba47, 0x9d00, 0), 112, 202},
+	{RTE_IPV6(0x9800, 0, 0, 0, 0, 0, 0, 0), 5, 136},
+	{RTE_IPV6(0x09ae, 0xd3c8, 0x7800, 0, 0, 0, 0, 0), 37, 107},
+	{RTE_IPV6(0x5996, 0x5f1c, 0xd10d, 0x7d9f, 0xfef4, 0x6e00, 0, 0), 87, 193},
+	{RTE_IPV6(0x171c, 0xca0a, 0x5a9e, 0x4000, 0, 0, 0, 0), 52, 4},
+	{RTE_IPV6(0x3019, 0xb409, 0x54ec, 0x0690, 0x1ec6, 0x2938, 0, 0), 96, 68},
+	{RTE_IPV6(0xfc00, 0, 0, 0, 0, 0, 0, 0), 7, 40},
+	{RTE_IPV6(0x14a5, 0x3982, 0xa400, 0, 0, 0, 0, 0), 39, 255},
+	{RTE_IPV6(0xa738, 0x8000, 0, 0, 0, 0, 0, 0), 18, 108},
+	{RTE_IPV6(0x5bcc, 0, 0, 0, 0, 0, 0, 0), 14, 219},
+	{RTE_IPV6(0x182e, 0x0904, 0xaa96, 0x3882, 0x7f78, 0x7668, 0xa830, 0), 108, 12},
+	{RTE_IPV6(0x9c3c, 0xf5f7, 0xbd00, 0, 0, 0, 0, 0), 40, 84},
+	{RTE_IPV6(0x9468, 0xbbae, 0x811c, 0x7fa2, 0x5cde, 0x3412, 0, 0), 96, 33},
+	{RTE_IPV6(0x26fd, 0xb699, 0xe9c2, 0x9f29, 0x5ec1, 0xfea0, 0, 0), 91, 199},
+	{RTE_IPV6(0x9c4d, 0x69eb, 0x91d8, 0, 0, 0, 0, 0), 46, 52},
+	{RTE_IPV6(0x64d3, 0xee93, 0x41de, 0x6349, 0xfc71, 0x2e71, 0x3488, 0), 113, 184},
+	{RTE_IPV6(0x0d1e, 0, 0, 0, 0, 0, 0, 0), 18, 124},
+	{RTE_IPV6(0x1df0, 0x8de6, 0x4eed, 0x1987, 0x8306, 0x414d, 0x4df8, 0), 109, 128},
+	{RTE_IPV6(0x0fc0, 0x6d1f, 0x95dd, 0, 0, 0, 0, 0), 49, 255},
+	{RTE_IPV6(0x50b9, 0xaa47, 0x293a, 0x9e6a, 0xfd07, 0x02b8, 0xad00, 0), 105, 146},
+	{RTE_IPV6(0x10e5, 0xe000, 0, 0, 0, 0, 0, 0), 24, 172},
+	{RTE_IPV6(0xa902, 0x9909, 0xa9cb, 0xf59a, 0xb800, 0, 0, 0), 70, 116},
+	{RTE_IPV6(0x9087, 0xefa4, 0x8ebb, 0x406d, 0, 0, 0, 0), 66, 189},
+	{RTE_IPV6(0xaa4e, 0xfce3, 0xf2c7, 0x82fb, 0xc800, 0, 0, 0), 70, 10},
+	{RTE_IPV6(0xe812, 0x0f7e, 0xa67e, 0x3a19, 0xd13e, 0x4c4f, 0, 0), 98, 184},
+	{RTE_IPV6(0xaa52, 0x4835, 0x8000, 0, 0, 0, 0, 0), 33, 98},
+	{RTE_IPV6(0x9864, 0x257a, 0xf240, 0, 0, 0, 0, 0), 42, 37},
+	{RTE_IPV6(0xaee7, 0xe621, 0x47e0, 0, 0, 0, 0, 0), 46, 174},
+	{RTE_IPV6(0x4ae1, 0xfc99, 0xca08, 0xa227, 0x4000, 0, 0, 0), 67, 251},
+	{RTE_IPV6(0xa7ba, 0x65bb, 0x7a00, 0, 0, 0, 0, 0), 42, 115},
+	{RTE_IPV6(0x5307, 0x157a, 0xf343, 0xab92, 0x91a0, 0xa867, 0xdf40, 0), 107, 252},
+	{RTE_IPV6(0x5384, 0xdb56, 0x5600, 0, 0, 0, 0, 0), 40, 176},
+	{RTE_IPV6(0x1671, 0x4866, 0x4910, 0xec39, 0xc57a, 0x1f00, 0, 0), 91, 155},
+	{RTE_IPV6(0xfa3b, 0x4023, 0x4870, 0x9f55, 0xc805, 0xc127, 0x98b9, 0x9410), 124, 36},
+	{RTE_IPV6(0xdc15, 0x30a4, 0xe079, 0x1145, 0x0a76, 0x6a00, 0, 0), 88, 202},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 5, 208},
+	{RTE_IPV6(0xf740, 0x537d, 0xc3e1, 0x324c, 0x1268, 0, 0, 0), 77, 158},
+	{RTE_IPV6(0x4e5b, 0x1fca, 0xbd19, 0x0d85, 0xdc00, 0, 0, 0), 72, 136},
+	{RTE_IPV6(0x69c5, 0x1a50, 0, 0, 0, 0, 0, 0), 28, 191},
+	{RTE_IPV6(0x0e1f, 0x9af2, 0xf1e7, 0x3797, 0xdf38, 0x86ff, 0x71ce, 0x4500), 120, 126},
+	{RTE_IPV6(0xf7c1, 0x3ab0, 0x1047, 0x1f78, 0xd568, 0xe753, 0x1a76, 0x5b87), 128, 139},
+	{RTE_IPV6(0x8820, 0xf000, 0, 0, 0, 0, 0, 0), 25, 216},
+	{RTE_IPV6(0x64ee, 0x7028, 0, 0, 0, 0, 0, 0), 29, 93},
+	{RTE_IPV6(0x5000, 0, 0, 0, 0, 0, 0, 0), 4, 196},
+	{RTE_IPV6(0xe9e0, 0xfe39, 0x21cd, 0x8cd9, 0xb548, 0, 0, 0), 81, 119},
+	{RTE_IPV6(0x6b4b, 0x419e, 0x808e, 0xbfbc, 0xbcf0, 0x94f3, 0x7400, 0), 104, 93},
+	{RTE_IPV6(0x2746, 0x7872, 0x45ed, 0x5f30, 0xe9b0, 0x5b9a, 0, 0), 96, 183},
+	{RTE_IPV6(0x0a3d, 0x2b65, 0x4066, 0xd000, 0, 0, 0, 0), 52, 207},
+	{RTE_IPV6(0x9780, 0, 0, 0, 0, 0, 0, 0), 9, 102},
+	{RTE_IPV6(0xd2f1, 0, 0, 0, 0, 0, 0, 0), 19, 36},
+	{RTE_IPV6(0x34de, 0xf91f, 0x6c89, 0xc701, 0xf2ad, 0xb890, 0, 0), 93, 41},
+	{RTE_IPV6(0x7b6f, 0x58c0, 0x4546, 0, 0, 0, 0, 0), 47, 70},
+	{RTE_IPV6(0xb452, 0xbc7d, 0x8c08, 0xc44a, 0, 0, 0, 0), 63, 218},
+	{RTE_IPV6(0x4d9e, 0x2265, 0xc466, 0x38dc, 0x2a8f, 0xb5bb, 0xf040, 0xa100), 120, 226},
+	{RTE_IPV6(0x58dc, 0xde26, 0x176c, 0x0594, 0xb96e, 0x140e, 0x433d, 0), 114, 25},
+	{RTE_IPV6(0x5a41, 0xdca5, 0xc585, 0x6e5c, 0xe413, 0x0211, 0, 0), 98, 6},
+	{RTE_IPV6(0x2320, 0, 0, 0, 0, 0, 0, 0), 11, 26},
+	{RTE_IPV6(0x677b, 0x31d1, 0xe4e5, 0x9000, 0, 0, 0, 0), 52, 149},
+	{RTE_IPV6(0x32f4, 0x3abf, 0x5f9c, 0, 0, 0, 0, 0), 46, 127},
+	{RTE_IPV6(0x8ca9, 0x4b4d, 0x4e56, 0x2810, 0, 0, 0, 0), 62, 144},
+	{RTE_IPV6(0x63b0, 0xaf53, 0x7232, 0xd600, 0, 0, 0, 0), 56, 213},
+	{RTE_IPV6(0x13d0, 0xd34c, 0x55b0, 0xf740, 0, 0, 0, 0), 58, 115},
+	{RTE_IPV6(0x991c, 0xbc71, 0xd374, 0x07b2, 0x88cd, 0x6000, 0, 0), 83, 146},
+	{RTE_IPV6(0xa0b4, 0xdc80, 0, 0, 0, 0, 0, 0), 26, 58},
+	{RTE_IPV6(0xea06, 0x7013, 0x3d4a, 0, 0, 0, 0, 0), 47, 222},
+	{RTE_IPV6(0x616e, 0x2275, 0x9594, 0, 0, 0, 0, 0), 50, 16},
+	{RTE_IPV6(0x63ad, 0x7749, 0xfa1e, 0x901e, 0x8000, 0, 0, 0), 65, 169},
+	{RTE_IPV6(0xa986, 0x6f59, 0x0900, 0, 0, 0, 0, 0), 40, 175},
+	{RTE_IPV6(0x8650, 0xe32b, 0, 0, 0, 0, 0, 0), 32, 3},
+	{RTE_IPV6(0xe7f3, 0x2350, 0x4bcf, 0x8089, 0x36aa, 0x47ee, 0, 0), 96, 2},
+	{RTE_IPV6(0xbdbe, 0x7987, 0xa000, 0, 0, 0, 0, 0), 36, 193},
+	{RTE_IPV6(0x8f9b, 0xd8c1, 0xefcd, 0xcc99, 0x8fec, 0x4517, 0xc8d3, 0), 118, 151},
+	{RTE_IPV6(0x2001, 0x73f4, 0x21db, 0x6000, 0, 0, 0, 0), 51, 182},
+	{RTE_IPV6(0xdc80, 0, 0, 0, 0, 0, 0, 0), 10, 148},
+	{RTE_IPV6(0xce57, 0x87eb, 0x7480, 0, 0, 0, 0, 0), 42, 53},
+	{RTE_IPV6(0x9880, 0, 0, 0, 0, 0, 0, 0), 11, 87},
+	{RTE_IPV6(0x3a92, 0xbce9, 0xe6ec, 0xc0d6, 0xa880, 0, 0, 0), 73, 235},
+	{RTE_IPV6(0x54dc, 0x5200, 0, 0, 0, 0, 0, 0), 23, 51},
+	{RTE_IPV6(0x6a91, 0x8e2a, 0xbaba, 0x3a01, 0x3062, 0xa583, 0x309c, 0xc000), 116, 11},
+	{RTE_IPV6(0x35db, 0x78f2, 0xa6d6, 0x5182, 0x4000, 0, 0, 0), 68, 28},
+	{RTE_IPV6(0xf078, 0x4ca3, 0x20c5, 0xb5fb, 0x62dc, 0x1de2, 0, 0), 96, 73},
+	{RTE_IPV6(0xeac5, 0x0ca0, 0, 0, 0, 0, 0, 0), 28, 216},
+	{RTE_IPV6(0xbf5e, 0, 0, 0, 0, 0, 0, 0), 16, 99},
+	{RTE_IPV6(0xc808, 0, 0, 0, 0, 0, 0, 0), 18, 35},
+	{RTE_IPV6(0x1d81, 0x2f53, 0x134b, 0x9e01, 0x1c18, 0x1a93, 0x5277, 0x8c64), 127, 195},
+	{RTE_IPV6(0xf1ae, 0x1a35, 0x9870, 0xc886, 0x54bb, 0xb1b0, 0x2a40, 0), 108, 176},
+	{RTE_IPV6(0x4dab, 0x9130, 0xc354, 0xbe24, 0x7ac7, 0x1200, 0, 0), 87, 217},
+	{RTE_IPV6(0x6968, 0x8735, 0xe276, 0xeea9, 0x09fd, 0x84a2, 0xd97b, 0xbf60), 126, 244},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 125},
+	{RTE_IPV6(0x2955, 0x8f80, 0x5b89, 0xc000, 0, 0, 0, 0), 50, 219},
+	{RTE_IPV6(0x746e, 0xc000, 0, 0, 0, 0, 0, 0), 18, 165},
+	{RTE_IPV6(0x4bd5, 0x2c10, 0x2b9d, 0x22ab, 0x6275, 0x6d97, 0x053c, 0xe000), 117, 6},
+	{RTE_IPV6(0xe517, 0x743d, 0x508b, 0xc800, 0, 0, 0, 0), 53, 47},
+	{RTE_IPV6(0x537b, 0x4a00, 0, 0, 0, 0, 0, 0), 23, 73},
+	{RTE_IPV6(0x97f3, 0x2dd9, 0xd89e, 0, 0, 0, 0, 0), 47, 98},
+	{RTE_IPV6(0xabb8, 0x6ed3, 0xed72, 0x9000, 0, 0, 0, 0), 52, 21},
+	{RTE_IPV6(0x07f6, 0xc777, 0, 0, 0, 0, 0, 0), 32, 142},
+	{RTE_IPV6(0x672f, 0x4611, 0x1fe8, 0x2c4b, 0x919b, 0x64d8, 0, 0), 93, 34},
+	{RTE_IPV6(0x41aa, 0xa964, 0xa793, 0x8efb, 0x1440, 0, 0, 0), 74, 41},
+	{RTE_IPV6(0xeb06, 0xe5f8, 0x9789, 0x2400, 0, 0, 0, 0), 55, 80},
+	{RTE_IPV6(0x9c27, 0x6000, 0, 0, 0, 0, 0, 0), 22, 11},
+	{RTE_IPV6(0x5cbc, 0x52c0, 0x8ef9, 0xbe80, 0, 0, 0, 0), 58, 254},
+	{RTE_IPV6(0xfdda, 0xb52e, 0x8690, 0, 0, 0, 0, 0), 45, 95},
+	{RTE_IPV6(0xbd13, 0x1ff4, 0x5000, 0, 0, 0, 0, 0), 40, 8},
+	{RTE_IPV6(0x1e74, 0, 0, 0, 0, 0, 0, 0), 14, 212},
+	{RTE_IPV6(0x51e2, 0x0dad, 0x4f7b, 0xdf7c, 0x6c50, 0x53ee, 0, 0), 95, 217},
+	{RTE_IPV6(0x7ed3, 0xce52, 0x93d7, 0x0600, 0, 0, 0, 0), 57, 15},
+	{RTE_IPV6(0x2ae5, 0x87c5, 0xc4f3, 0x5eb5, 0x8522, 0x1000, 0, 0), 84, 66},
+	{RTE_IPV6(0x44d2, 0x9e40, 0, 0, 0, 0, 0, 0), 28, 122},
+	{RTE_IPV6(0xb73f, 0xdf5e, 0x5129, 0xcb14, 0xecd4, 0xdcc7, 0, 0), 97, 12},
+	{RTE_IPV6(0x8392, 0x027d, 0xae2b, 0xe714, 0xc200, 0, 0, 0), 71, 171},
+	{RTE_IPV6(0x1fb4, 0xf69e, 0x1cc0, 0xec27, 0xed37, 0x4ac3, 0xabc0, 0), 106, 42},
+	{RTE_IPV6(0xb30a, 0x4650, 0, 0, 0, 0, 0, 0), 28, 194},
+	{RTE_IPV6(0x9333, 0x55b9, 0xead1, 0xec57, 0x9311, 0x0744, 0x9420, 0), 107, 237},
+	{RTE_IPV6(0xb1b2, 0x0628, 0x2ea6, 0x57c6, 0xd6ea, 0x17e0, 0, 0), 93, 151},
+	{RTE_IPV6(0xc935, 0x2814, 0x3104, 0x268b, 0x85d9, 0xd686, 0x59c8, 0), 109, 238},
+	{RTE_IPV6(0x041a, 0xb525, 0xce81, 0xe920, 0, 0, 0, 0), 59, 128},
+	{RTE_IPV6(0x513a, 0xf800, 0, 0, 0, 0, 0, 0), 26, 227},
+	{RTE_IPV6(0x12ee, 0xfaa1, 0x39f6, 0xd076, 0x0e4c, 0x4919, 0x4116, 0x9878), 127, 138},
+	{RTE_IPV6(0x1f80, 0, 0, 0, 0, 0, 0, 0), 10, 60},
+	{RTE_IPV6(0x73c3, 0x4000, 0, 0, 0, 0, 0, 0), 18, 148},
+	{RTE_IPV6(0x7416, 0x4b21, 0x1081, 0x237c, 0x0a70, 0x1fd5, 0xb56c, 0xb12e), 128, 129},
+	{RTE_IPV6(0x75d6, 0x1450, 0x5333, 0x8000, 0, 0, 0, 0), 49, 202},
+	{RTE_IPV6(0x784b, 0x7c95, 0x787b, 0xf297, 0xb5a4, 0x8000, 0, 0), 81, 88},
+	{RTE_IPV6(0x57ee, 0xa83e, 0x58a6, 0x3468, 0xdba9, 0x5d80, 0, 0), 90, 3},
+	{RTE_IPV6(0xed2c, 0xe092, 0x3455, 0xf5c0, 0x4189, 0x255f, 0x9cb0, 0), 108, 243},
+	{RTE_IPV6(0xd6f1, 0x333f, 0x493d, 0xc1a5, 0x176c, 0, 0, 0), 80, 95},
+	{RTE_IPV6(0x57f2, 0x159d, 0x2dbc, 0x243e, 0x42f3, 0x4000, 0, 0), 87, 255},
+	{RTE_IPV6(0x0061, 0xdc40, 0, 0, 0, 0, 0, 0), 26, 48},
+	{RTE_IPV6(0xe3ce, 0xbd1f, 0xde08, 0xc000, 0, 0, 0, 0), 50, 38},
+	{RTE_IPV6(0xae1b, 0x0010, 0x0d96, 0x217a, 0x9a3b, 0xec23, 0xf8b2, 0x4000), 115, 20},
+	{RTE_IPV6(0x2714, 0x7d45, 0xfc40, 0, 0, 0, 0, 0), 43, 41},
+	{RTE_IPV6(0x8de8, 0x010c, 0x7de5, 0xa80e, 0x7d74, 0xb400, 0, 0), 92, 133},
+	{RTE_IPV6(0x5dee, 0x28e4, 0xfecb, 0xfb06, 0x3c52, 0xf3f2, 0, 0), 95, 189},
+	{RTE_IPV6(0x2c73, 0xc811, 0x92df, 0x73fd, 0x7ece, 0x985a, 0, 0), 95, 151},
+	{RTE_IPV6(0xd53a, 0xebff, 0x06a3, 0x3d0a, 0xe000, 0, 0, 0), 68, 100},
+	{RTE_IPV6(0x1956, 0x8b74, 0xbe58, 0, 0, 0, 0, 0), 49, 118},
+	{RTE_IPV6(0x7128, 0x418d, 0x4000, 0, 0, 0, 0, 0), 34, 164},
+	{RTE_IPV6(0x95cd, 0xc8ba, 0x137e, 0xd7c7, 0x5e25, 0x6420, 0x8000, 0), 98, 71},
+	{RTE_IPV6(0x2700, 0, 0, 0, 0, 0, 0, 0), 9, 251},
+	{RTE_IPV6(0x5157, 0x50ad, 0xa3a6, 0x6800, 0, 0, 0, 0), 57, 51},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 185},
+	{RTE_IPV6(0x8c80, 0, 0, 0, 0, 0, 0, 0), 10, 144},
+	{RTE_IPV6(0x062a, 0x01b2, 0xfa35, 0xbab2, 0x7279, 0xc000, 0, 0), 84, 51},
+	{RTE_IPV6(0x0211, 0xea33, 0xa905, 0xdb95, 0xf5ed, 0x0400, 0, 0), 87, 32},
+	{RTE_IPV6(0x70bb, 0xad11, 0xe5ab, 0xe1aa, 0x0800, 0, 0, 0), 70, 137},
+	{RTE_IPV6(0xcb47, 0x8ced, 0x7160, 0x7b10, 0, 0, 0, 0), 60, 2},
+	{RTE_IPV6(0x638a, 0xcf02, 0xf419, 0xd362, 0, 0, 0, 0), 63, 163},
+	{RTE_IPV6(0x722a, 0x62f6, 0xfc30, 0xe976, 0x3fe2, 0x9de2, 0xc000, 0), 100, 162},
+	{RTE_IPV6(0xa140, 0, 0, 0, 0, 0, 0, 0), 10, 192},
+	{RTE_IPV6(0xe946, 0xf02d, 0xf000, 0, 0, 0, 0, 0), 36, 185},
+	{RTE_IPV6(0x1c7b, 0x1fb0, 0xebe5, 0xa9c0, 0, 0, 0, 0), 59, 51},
+	{RTE_IPV6(0x92c5, 0xf3eb, 0xf338, 0x8c00, 0, 0, 0, 0), 54, 93},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 159},
+	{RTE_IPV6(0x8d5c, 0x0d1b, 0x57f1, 0xab8f, 0xdc00, 0, 0, 0), 72, 189},
+	{RTE_IPV6(0xa497, 0xc000, 0, 0, 0, 0, 0, 0), 21, 248},
+	{RTE_IPV6(0x23bc, 0xf84f, 0x2797, 0xe8d7, 0xf8f5, 0xb990, 0x4e66, 0xad80), 123, 38},
+	{RTE_IPV6(0xc1e8, 0xa63c, 0x3e50, 0xe6e1, 0xa5f0, 0, 0, 0), 76, 167},
+	{RTE_IPV6(0x6de5, 0x769b, 0x2b9a, 0, 0, 0, 0, 0), 51, 28},
+	{RTE_IPV6(0xa03e, 0x3fd4, 0xda8a, 0x9a6c, 0xa37f, 0xc5ed, 0xb72c, 0x8cc0), 125, 37},
+	{RTE_IPV6(0xc425, 0x3392, 0x1a55, 0x351f, 0xd88d, 0x34da, 0x9920, 0), 107, 234},
+	{RTE_IPV6(0xe480, 0, 0, 0, 0, 0, 0, 0), 9, 70},
+	{RTE_IPV6(0x9af8, 0x14f2, 0x9af4, 0x3f11, 0x7934, 0x4654, 0x76d0, 0), 108, 50},
+	{RTE_IPV6(0x2964, 0x1b54, 0x6a70, 0x6000, 0, 0, 0, 0), 51, 171},
+	{RTE_IPV6(0x5163, 0xc58b, 0x1e96, 0xe6d8, 0x51be, 0x54a5, 0x1d40, 0x8000), 113, 236},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 3},
+	{RTE_IPV6(0xa477, 0xfd7e, 0xa0f9, 0xb7bf, 0x776f, 0xe000, 0, 0), 86, 64},
+	{RTE_IPV6(0x8a3a, 0xc6fe, 0x00c5, 0x3c5b, 0x84c7, 0xb5fb, 0x4ea0, 0), 108, 213},
+	{RTE_IPV6(0xd159, 0xa8ec, 0x92a9, 0x6400, 0, 0, 0, 0), 54, 15},
+	{RTE_IPV6(0x83d2, 0xd000, 0, 0, 0, 0, 0, 0), 20, 145},
+	{RTE_IPV6(0xa5be, 0x9d07, 0x8305, 0x9300, 0, 0, 0, 0), 57, 27},
+	{RTE_IPV6(0xb3e2, 0x39cc, 0xbb46, 0x3451, 0x77a2, 0xe52a, 0x2fb9, 0x09a2), 127, 75},
+	{RTE_IPV6(0x62eb, 0x9b33, 0x6ba7, 0x7f89, 0xfef6, 0xa2ab, 0xb40d, 0xe900), 123, 76},
+	{RTE_IPV6(0x6b4f, 0x4c5a, 0x5e97, 0x9b1f, 0x2173, 0x13cc, 0x6273, 0), 113, 247},
+	{RTE_IPV6(0x8f2e, 0x1eaf, 0xe000, 0, 0, 0, 0, 0), 43, 121},
+	{RTE_IPV6(0x9b55, 0xd9b4, 0, 0, 0, 0, 0, 0), 30, 214},
+	{RTE_IPV6(0x3a3e, 0x9c00, 0, 0, 0, 0, 0, 0), 26, 221},
+	{RTE_IPV6(0x5c9b, 0x3503, 0x276c, 0x9bc8, 0, 0, 0, 0), 63, 102},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 2, 191},
+	{RTE_IPV6(0x3f86, 0xfb3b, 0xc000, 0, 0, 0, 0, 0), 39, 197},
+	{RTE_IPV6(0xea95, 0xdc6a, 0x0090, 0xd680, 0x2366, 0, 0, 0), 79, 106},
 };
 
 #define  NUM_ROUTE_ENTRIES RTE_DIM(large_route_table)
@@ -1067,19 +1068,19 @@ static inline void mask_ip6_prefix(uint8_t *ip_out,
 /* check if IPv6 address ip[] match the rule with IPv6 address ip_rule[]
  * and depth. if matched, return 0, else return -1.
  */
-static inline int check_lpm6_rule(uint8_t *ip,
-	const uint8_t *ip_rule, uint8_t depth)
+static inline int check_lpm6_rule(const struct rte_ipv6_addr *ip,
+	const struct rte_ipv6_addr *ip_rule, uint8_t depth)
 {
 	int k;
 	uint8_t mask;
 
 	for (k = 0; k < 16; k++) {
 		if (depth >= 8) {
-			if (ip[k] != ip_rule[k])
+			if (ip->a[k] != ip_rule->a[k])
 				return -1;
 		} else if (depth > 0) {
 			mask = (uint8_t)((unsigned int)(-1) << (8 - depth));
-			if ((ip[k] & mask) == (ip_rule[k] & mask))
+			if ((ip->a[k] & mask) == (ip_rule->a[k] & mask))
 				return 0;
 			else
 				return -1;
@@ -1098,7 +1099,7 @@ static inline int check_lpm6_rule(uint8_t *ip,
  * if found that some item in rule[] is matched return 0,
  * else return -1;
  */
-static int get_next_hop(uint8_t *ip, uint8_t *next_hop,
+static int get_next_hop(const struct rte_ipv6_addr *ip, uint8_t *next_hop,
 	const struct rules_tbl_entry *rule, int rule_num)
 {
 	int i;
@@ -1107,7 +1108,7 @@ static int get_next_hop(uint8_t *ip, uint8_t *next_hop,
 
 	for (i = 0; i < rule_num; i++) {
 		if (rule[i].depth >= max_depth) {
-			result = check_lpm6_rule(ip, rule[i].ip, rule[i].depth);
+			result = check_lpm6_rule(ip, &rule[i].ip, rule[i].depth);
 			if (result == 0) {
 				*next_hop = rule[i].next_hop;
 				max_depth = rule[i].depth;
@@ -1131,12 +1132,12 @@ static void generate_large_ips_table(int gen_expected_next_hop)
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++) {
 		for (j = 0; j < 16; j++)
-			large_ips_table[i].ip[j] = rte_rand();
+			large_ips_table[i].ip.a[j] = rte_rand();
 	}
 
 	for (k = j = 0, i = 0; i < NUM_IPS_ENTRIES; i++) {
-		mask_ip6_prefix(large_ips_table[i].ip,
-			large_route_table[j].ip, large_route_table[j].depth);
+		mask_ip6_prefix(large_ips_table[i].ip.a,
+			large_route_table[j].ip.a, large_route_table[j].depth);
 		k++;
 		if (k == (NUM_IPS_ENTRIES / NUM_ROUTE_ENTRIES)) {
 			j++;
@@ -1150,7 +1151,7 @@ static void generate_large_ips_table(int gen_expected_next_hop)
 		return;
 
 	for (k = 0; k < NUM_IPS_ENTRIES; k++)
-		get_next_hop(large_ips_table[k].ip,
+		get_next_hop(&large_ips_table[k].ip,
 				&(large_ips_table[k].next_hop),
 				large_route_table,
 				NUM_ROUTE_ENTRIES);
diff --git a/app/test/test_lpm6_perf.c b/app/test/test_lpm6_perf.c
index c847dcb18375..1860a99ed6f1 100644
--- a/app/test/test_lpm6_perf.c
+++ b/app/test/test_lpm6_perf.c
@@ -82,7 +82,7 @@ test_lpm6_perf(void)
 	begin = rte_rdtsc();
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
-		if (rte_lpm6_add(lpm, large_route_table[i].ip,
+		if (rte_lpm6_add(lpm, &large_route_table[i].ip,
 				large_route_table[i].depth, next_hop_add) == 0)
 			status++;
 	}
@@ -101,7 +101,7 @@ test_lpm6_perf(void)
 		begin = rte_rdtsc();
 
 		for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
-			if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
+			if (rte_lpm6_lookup(lpm, &large_ips_table[j].ip,
 					&next_hop_return) != 0)
 				count++;
 		}
@@ -117,11 +117,11 @@ test_lpm6_perf(void)
 	total_time = 0;
 	count = 0;
 
-	uint8_t ip_batch[NUM_IPS_ENTRIES][16];
+	struct rte_ipv6_addr ip_batch[NUM_IPS_ENTRIES];
 	int32_t next_hops[NUM_IPS_ENTRIES];
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++)
-		memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+		ip_batch[i] = large_ips_table[i].ip;
 
 	for (i = 0; i < ITERATIONS; i ++) {
 
@@ -144,7 +144,7 @@ test_lpm6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		/* rte_lpm_delete(lpm, ip, depth) */
-		status += rte_lpm6_delete(lpm, large_route_table[i].ip,
+		status += rte_lpm6_delete(lpm, &large_route_table[i].ip,
 				large_route_table[i].depth);
 	}
 
diff --git a/app/test/test_table_combined.c b/app/test/test_table_combined.c
index 1c2efe649ded..04503baa5180 100644
--- a/app/test/test_table_combined.c
+++ b/app/test/test_table_combined.c
@@ -362,7 +362,7 @@ test_table_lpm_ipv6_combined(void)
 	struct rte_table_lpm_ipv6_key lpm_ipv6_key = {
 		.depth = 16,
 	};
-	memset(lpm_ipv6_key.ip, 0xad, 16);
+	memset(&lpm_ipv6_key.ip, 0xad, 16);
 
 	struct table_packets table_packets;
 
diff --git a/app/test/test_table_tables.c b/app/test/test_table_tables.c
index 26908e6112fc..920aa555cbd2 100644
--- a/app/test/test_table_tables.c
+++ b/app/test/test_table_tables.c
@@ -525,10 +525,10 @@ test_table_lpm_ipv6(void)
 	/* Add */
 	struct rte_table_lpm_ipv6_key lpm_key;
 
-	lpm_key.ip[0] = 0xad;
-	lpm_key.ip[1] = 0xad;
-	lpm_key.ip[2] = 0xad;
-	lpm_key.ip[3] = 0xad;
+	lpm_key.ip.a[0] = 0xad;
+	lpm_key.ip.a[1] = 0xad;
+	lpm_key.ip.a[2] = 0xad;
+	lpm_key.ip.a[3] = 0xad;
 
 	table = rte_table_lpm_ipv6_ops.f_create(&lpm_params, 0, entry_size);
 	if (table == NULL)
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 830904203c38..0b658fce37f7 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -80,13 +80,6 @@ Deprecation Notices
     - ``struct rte_ipv6_tuple``
   ipsec
     - ``struct rte_ipsec_sadv6_key``
-  lpm
-    - ``rte_lpm6_add()``
-    - ``rte_lpm6_is_rule_present()``
-    - ``rte_lpm6_delete()``
-    - ``rte_lpm6_delete_bulk_func()``
-    - ``rte_lpm6_lookup()``
-    - ``rte_lpm6_lookup_bulk_func()``
   node
     - ``rte_node_ip6_route_add()``
   pipeline
@@ -98,8 +91,6 @@ Deprecation Notices
     - ``rte_rib6_insert()``
     - ``rte_rib6_remove()``
     - ``rte_rib6_get_ip()``
-  table
-    - ``struct rte_table_lpm_ipv6_key``
 
 * net, ethdev: The flow item ``RTE_FLOW_ITEM_TYPE_VXLAN_GPE``
   is replaced with ``RTE_FLOW_ITEM_TYPE_VXLAN``.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index de24705ef662..c61269a635d5 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -289,8 +289,17 @@ API Changes
 * net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays.
   The following libraries and symbols were modified:
 
+  lpm
+    - ``rte_lpm6_add()``
+    - ``rte_lpm6_delete()``
+    - ``rte_lpm6_delete_bulk_func()``
+    - ``rte_lpm6_is_rule_present()``
+    - ``rte_lpm6_lookup()``
+    - ``rte_lpm6_lookup_bulk_func()``
   net
     - ``struct rte_ipv6_hdr``
+  table
+    - ``struct rte_table_lpm_ipv6_key``
 
 
 ABI Changes
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 4c0fa5054a2e..1f841028442f 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -184,21 +184,20 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
  */
 
 struct l3fwd_ipv6_route {
-	uint8_t ip[IPV6_ADDR_LEN];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
 
 /* Default l3fwd_ipv6_route_array table. 8< */
 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
-	{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
-	{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
-	{{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
-	{{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
-	{{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
-	{{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
-	{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
-	{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
+	{RTE_IPV6(0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 0},
+	{RTE_IPV6(0x0201, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 1},
+	{RTE_IPV6(0x0301, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 2},
+	{RTE_IPV6(0x0401, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 3},
+	{RTE_IPV6(0x0501, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 4},
+	{RTE_IPV6(0x0601, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 5},
+	{RTE_IPV6(0x0701, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 6}
 };
 /* >8 End of default l3fwd_ipv6_route_array table. */
 
@@ -311,7 +310,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
+		if (rte_lpm6_lookup(rxq->lpm6, &ip_hdr->dst_addr,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			port_out = next_hop;
@@ -751,7 +750,7 @@ init_routing_table(void)
 			/* populate the LPM6 table */
 			for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
 				ret = rte_lpm6_add(lpm6,
-					l3fwd_ipv6_route_array[i].ip,
+					&l3fwd_ipv6_route_array[i].ip,
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 
@@ -764,7 +763,7 @@ init_routing_table(void)
 				RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT
 						"/%d (port %d)\n",
 					socket,
-					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
+					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip.a),
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 			}
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
index 8a912dc1b392..9d8082b73080 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -2563,7 +2563,7 @@ match_convert(struct table_rule_match *mh,
 			ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
 			ml->lpm_ipv4.depth = mh->match.lpm.depth;
 		} else {
-			memcpy(ml->lpm_ipv6.ip,
+			memcpy(&ml->lpm_ipv6.ip,
 				mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
 			ml->lpm_ipv6.depth = mh->match.lpm.depth;
 		}
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 4da692eb23e6..17ae76d4badb 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -205,21 +205,20 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
  */
 
 struct l3fwd_ipv6_route {
-	uint8_t ip[IPV6_ADDR_LEN];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
 
 /* Default l3fwd_ipv6_route_array table. 8< */
 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
-	{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
-	{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
-	{{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
-	{{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
-	{{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
-	{{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
-	{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
-	{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
+	{RTE_IPV6(0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 0},
+	{RTE_IPV6(0x0201, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 1},
+	{RTE_IPV6(0x0301, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 2},
+	{RTE_IPV6(0x0401, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 3},
+	{RTE_IPV6(0x0501, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 4},
+	{RTE_IPV6(0x0601, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 5},
+	{RTE_IPV6(0x0701, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 6}
 };
 /* >8 End of default l3fwd_ipv6_route_array table. */
 
@@ -400,7 +399,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
 		}
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
+		if (rte_lpm6_lookup(rxq->lpm6, &ip_hdr->dst_addr,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			dst_port = next_hop;
@@ -797,7 +796,7 @@ init_routing_table(void)
 			/* populate the LPM6 table */
 			for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
 				ret = rte_lpm6_add(lpm6,
-					l3fwd_ipv6_route_array[i].ip,
+					&l3fwd_ipv6_route_array[i].ip,
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 
@@ -810,7 +809,7 @@ init_routing_table(void)
 				RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
 						"/%d (port %d)\n",
 					socket,
-					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
+					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip.a),
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 			}
diff --git a/examples/ipsec-secgw/ipsec_lpm_neon.h b/examples/ipsec-secgw/ipsec_lpm_neon.h
index 9573f53ae957..752401d9f27f 100644
--- a/examples/ipsec-secgw/ipsec_lpm_neon.h
+++ b/examples/ipsec-secgw/ipsec_lpm_neon.h
@@ -5,6 +5,8 @@
 #ifndef IPSEC_LPM_NEON_H
 #define IPSEC_LPM_NEON_H
 
+#include <rte_ip6.h>
+
 #include <arm_neon.h>
 #include "ipsec_neon.h"
 
@@ -114,7 +116,7 @@ process_single_pkt(struct rt_ctx *rt_ctx, struct rte_mbuf *pkt,
 static inline void
 route6_pkts_neon(struct rt_ctx *rt_ctx, struct rte_mbuf **pkts, int nb_rx)
 {
-	uint8_t dst_ip6[MAX_PKT_BURST][16];
+	struct rte_ipv6_addr dst_ip6[MAX_PKT_BURST];
 	uint16_t dst_port[MAX_PKT_BURST];
 	struct rte_ether_hdr *eth_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
@@ -142,8 +144,7 @@ route6_pkts_neon(struct rt_ctx *rt_ctx, struct rte_mbuf **pkts, int nb_rx)
 			 * required to get the hop
 			 */
 			ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
-			memcpy(&dst_ip6[lpm_pkts][0],
-					ipv6_hdr->dst_addr, 16);
+			dst_ip6[lpm_pkts] = ipv6_hdr->dst_addr;
 			lpm_pkts++;
 		}
 	}
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index dd14226e8140..e0690fc8d9bd 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -298,17 +298,12 @@ route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 static inline uint16_t
 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
-	uint8_t dst_ip[16];
-	uint8_t *ip6_dst;
-	uint16_t offset;
+	struct rte_ipv6_hdr *ip;
 	uint32_t hop;
 	int ret;
 
-	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
-	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
-	memcpy(&dst_ip[0], ip6_dst, 16);
-
-	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
+	ip = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, RTE_ETHER_HDR_LEN);
+	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, &ip->dst_addr, &hop);
 
 	if (ret == 0) {
 		/* We have a hit */
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 8f6a1c06aa7f..8f9616129362 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -560,7 +560,7 @@ static __rte_always_inline void
 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
 {
 	int32_t hop[MAX_PKT_BURST * 2];
-	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+	struct rte_ipv6_addr dst_ip[MAX_PKT_BURST * 2];
 	struct rte_ether_hdr *ethhdr;
 	uint8_t *ip6_dst;
 	uint32_t pkt_hop = 0;
@@ -586,7 +586,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
 			offset = offsetof(struct ip6_hdr, ip6_dst);
 			ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
 					offset);
-			memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+			memcpy(&dst_ip[lpm_pkts], ip6_dst, 16);
 			lpm_pkts++;
 		}
 	}
diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c
index ce854ccb6018..059fc0c8f28c 100644
--- a/examples/ipsec-secgw/rt.c
+++ b/examples/ipsec-secgw/rt.c
@@ -25,7 +25,7 @@ struct ip4_route {
 };
 
 struct ip6_route {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -99,7 +99,7 @@ parse_rt_tokens(char **tokens, uint32_t n_tokens,
 					tokens[ti]);
 				if (status->status < 0)
 					return;
-				memcpy(route_ipv6->ip, ip.s6_addr, 16);
+				memcpy(&route_ipv6->ip, ip.s6_addr, 16);
 				route_ipv6->depth = (uint8_t)depth;
 			}
 		}
@@ -183,7 +183,7 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id)
 
 	/* populate the LPM table */
 	for (i = 0; i < nb_rt_ip6; i++) {
-		ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth,
+		ret = rte_lpm6_add(lpm6, &rt_ip6[i].ip, rt_ip6[i].depth,
 				rt_ip6[i].if_out);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
@@ -191,14 +191,14 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id)
 
 		printf("LPM6: Adding route "
 			" %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n",
-			(uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]),
-			(uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]),
-			(uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]),
-			(uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]),
-			(uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]),
-			(uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]),
-			(uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]),
-			(uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]),
+			(uint16_t)((rt_ip6[i].ip.a[0] << 8) | rt_ip6[i].ip.a[1]),
+			(uint16_t)((rt_ip6[i].ip.a[2] << 8) | rt_ip6[i].ip.a[3]),
+			(uint16_t)((rt_ip6[i].ip.a[4] << 8) | rt_ip6[i].ip.a[5]),
+			(uint16_t)((rt_ip6[i].ip.a[6] << 8) | rt_ip6[i].ip.a[7]),
+			(uint16_t)((rt_ip6[i].ip.a[8] << 8) | rt_ip6[i].ip.a[9]),
+			(uint16_t)((rt_ip6[i].ip.a[10] << 8) | rt_ip6[i].ip.a[11]),
+			(uint16_t)((rt_ip6[i].ip.a[12] << 8) | rt_ip6[i].ip.a[13]),
+			(uint16_t)((rt_ip6[i].ip.a[14] << 8) | rt_ip6[i].ip.a[15]),
 			rt_ip6[i].depth, rt_ip6[i].if_out);
 	}
 
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index a13dc011380d..9bda0ab633e1 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -151,7 +151,7 @@ struct ipv4_l3fwd_lpm_route {
 };
 
 struct ipv6_l3fwd_lpm_route {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	uint8_t ip[RTE_IPV6_ADDR_SIZE];
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -1371,7 +1371,7 @@ main(int argc, char **argv)
 
 		dst_port = ipv6_l3fwd_lpm_route_array[i].if_out;
 
-		memcpy(in6.s6_addr, ipv6_l3fwd_lpm_route_array[i].ip, RTE_LPM6_IPV6_ADDR_SIZE);
+		memcpy(in6.s6_addr, ipv6_l3fwd_lpm_route_array[i].ip, RTE_IPV6_ADDR_SIZE);
 		snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
 			 inet_ntop(AF_INET6, &in6, abuf, sizeof(abuf)),
 			 ipv6_l3fwd_lpm_route_array[i].depth,
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 85f862dd5b40..339cd58116a4 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -732,7 +732,7 @@ setup_fib(const int socketid)
 		rte_eth_dev_info_get(route_base_v6[i].if_out,
 				     &dev_info);
 		ret = rte_fib6_add(ipv6_l3fwd_fib_lookup_struct[socketid],
-			route_base_v6[i].ip_8,
+			route_base_v6[i].ip6.a,
 			route_base_v6[i].depth,
 			route_base_v6[i].if_out);
 
@@ -744,7 +744,7 @@ setup_fib(const int socketid)
 					i, socketid);
 		}
 
-		if (inet_ntop(AF_INET6, route_base_v6[i].ip_8,
+		if (inet_ntop(AF_INET6, route_base_v6[i].ip6.a,
 				abuf, sizeof(abuf)) != NULL) {
 			printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
 			       route_base_v6[i].depth,
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index 422fdb70054d..fc4f5878fcd8 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -62,7 +62,7 @@ lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
 		      uint16_t portid,
 		      struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
 {
-	const uint8_t *dst_ip = ipv6_hdr->dst_addr.a;
+	const struct rte_ipv6_addr *dst_ip = &ipv6_hdr->dst_addr;
 	uint32_t next_hop;
 
 	if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
@@ -122,7 +122,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
 
 		return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
-				ipv6_hdr->dst_addr.a, &next_hop) == 0)
+				&ipv6_hdr->dst_addr, &next_hop) == 0)
 				? next_hop : portid);
 
 	}
@@ -635,7 +635,7 @@ setup_lpm(const int socketid)
 		rte_eth_dev_info_get(route_base_v6[i].if_out,
 				     &dev_info);
 		ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
-			route_base_v6[i].ip_8,
+			&route_base_v6[i].ip6,
 			route_base_v6[i].depth,
 			route_base_v6[i].if_out);
 
@@ -647,7 +647,7 @@ setup_lpm(const int socketid)
 		}
 
 		printf("LPM: Adding route %s / %d (%d) [%s]\n",
-		       inet_ntop(AF_INET6, route_base_v6[i].ip_8, abuf,
+		       inet_ntop(AF_INET6, &route_base_v6[i].ip6, abuf,
 				 sizeof(abuf)),
 		       route_base_v6[i].depth,
 		       route_base_v6[i].if_out, rte_dev_name(dev_info.device));
diff --git a/examples/l3fwd/l3fwd_route.h b/examples/l3fwd/l3fwd_route.h
index 467c4d285915..62263c354054 100644
--- a/examples/l3fwd/l3fwd_route.h
+++ b/examples/l3fwd/l3fwd_route.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <rte_ip6.h>
+
 /* Log file related character defs. */
 #define COMMENT_LEAD_CHAR	('#')
 #define ROUTE_LEAD_CHAR		('R')
@@ -29,7 +31,7 @@ struct ipv4_l3fwd_route {
 };
 
 struct ipv6_l3fwd_route {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -53,10 +55,7 @@ struct ipv6_5tuple {
 struct lpm_route_rule {
 	union {
 		uint32_t ip;
-		union {
-			uint32_t ip_32[IPV6_ADDR_U32];
-			uint8_t ip_8[IPV6_ADDR_LEN];
-		};
+		struct rte_ipv6_addr ip6;
 	};
 	uint8_t depth;
 	uint8_t if_out;
diff --git a/examples/l3fwd/lpm_route_parse.c b/examples/l3fwd/lpm_route_parse.c
index f2028d79e180..9c179dc065c0 100644
--- a/examples/l3fwd/lpm_route_parse.c
+++ b/examples/l3fwd/lpm_route_parse.c
@@ -40,7 +40,7 @@ is_bypass_line(const char *buff)
 }
 
 static int
-parse_ipv6_addr_mask(char *token, uint32_t *ipv6, uint8_t *mask)
+parse_ipv6_addr_mask(char *token, struct rte_ipv6_addr *ipv6, uint8_t *mask)
 {
 	char *sa, *sm, *sv;
 	const char *dlm =  "/";
@@ -83,7 +83,7 @@ parse_ipv4_addr_mask(char *token, uint32_t *ipv4, uint8_t *mask)
 }
 
 static int
-lpm_parse_v6_net(char *in, uint32_t *v, uint8_t *mask_len)
+lpm_parse_v6_net(char *in, struct rte_ipv6_addr *v, uint8_t *mask_len)
 {
 	int32_t rc;
 
@@ -108,7 +108,7 @@ lpm_parse_v6_rule(char *str, struct lpm_route_rule *v)
 			return -EINVAL;
 	}
 
-	rc = lpm_parse_v6_net(in[CB_FLD_DST_ADDR], v->ip_32, &v->depth);
+	rc = lpm_parse_v6_net(in[CB_FLD_DST_ADDR], &v->ip6, &v->depth);
 
 	GET_CB_FIELD(in[CB_FLD_IF_OUT], v->if_out, 0, UINT8_MAX, 0);
 
@@ -164,8 +164,7 @@ lpm_add_default_v6_rules(void)
 	route_base_v6 = calloc(route_num_v6, rule_size);
 
 	for (i = 0; i < (unsigned int)route_num_v6; i++) {
-		memcpy(route_base_v6[i].ip_8, ipv6_l3fwd_route_array[i].ip,
-			   sizeof(route_base_v6[i].ip_8));
+		route_base_v6[i].ip6 = ipv6_l3fwd_route_array[i].ip;
 		route_base_v6[i].depth = ipv6_l3fwd_route_array[i].depth;
 		route_base_v6[i].if_out = ipv6_l3fwd_route_array[i].if_out;
 	}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 01b763e5ba11..6e2155e0052e 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -228,22 +228,22 @@ const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
  * 2001:200:0:{0-f}::/64 = Port {0-15}
  */
 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
-	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
-	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
-	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
-	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
-	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
-	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
-	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
-	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
-	{{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
-	{{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
-	{{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
-	{{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
-	{{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
-	{{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
-	{{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
-	{{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x0, 0, 0, 0, 0), 64, 0},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x1, 0, 0, 0, 0), 64, 1},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x2, 0, 0, 0, 0), 64, 2},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x3, 0, 0, 0, 0), 64, 3},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x4, 0, 0, 0, 0), 64, 4},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x5, 0, 0, 0, 0), 64, 5},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x6, 0, 0, 0, 0), 64, 6},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x7, 0, 0, 0, 0), 64, 7},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x8, 0, 0, 0, 0), 64, 8},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x9, 0, 0, 0, 0), 64, 9},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xa, 0, 0, 0, 0), 64, 10},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xb, 0, 0, 0, 0), 64, 11},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xc, 0, 0, 0, 0), 64, 12},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xd, 0, 0, 0, 0), 64, 13},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xe, 0, 0, 0, 0), 64, 14},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xf, 0, 0, 0, 0), 64, 15},
 };
 
 /*
diff --git a/lib/lpm/meson.build b/lib/lpm/meson.build
index ae30f80b69c8..fae4f79fb938 100644
--- a/lib/lpm/meson.build
+++ b/lib/lpm/meson.build
@@ -20,3 +20,4 @@ indirect_headers += files(
 )
 deps += ['hash']
 deps += ['rcu']
+deps += ['net']
diff --git a/lib/lpm/rte_lpm6.c b/lib/lpm/rte_lpm6.c
index 5bc17601ab71..8beb394c4701 100644
--- a/lib/lpm/rte_lpm6.c
+++ b/lib/lpm/rte_lpm6.c
@@ -67,14 +67,14 @@ struct rte_lpm6_tbl_entry {
 
 /** Rules tbl entry structure. */
 struct rte_lpm6_rule {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
+	struct rte_ipv6_addr ip; /**< Rule IP address. */
 	uint32_t next_hop; /**< Rule next hop. */
 	uint8_t depth; /**< Rule depth. */
 };
 
 /** Rules tbl entry key. */
 struct rte_lpm6_rule_key {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
+	struct rte_ipv6_addr ip; /**< Rule IP address. */
 	uint32_t depth; /**< Rule depth. */
 };
 
@@ -111,37 +111,6 @@ struct rte_lpm6 {
 			/**< LPM tbl8 table. */
 };
 
-/*
- * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
- * It leaves untouched one bit per unit in the depth variable
- * and set the rest to 0.
- */
-static inline void
-ip6_mask_addr(uint8_t *ip, uint8_t depth)
-{
-	int16_t part_depth, mask;
-	int i;
-
-	part_depth = depth;
-
-	for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
-		if (part_depth < BYTE_SIZE && part_depth >= 0) {
-			mask = (uint16_t)(~(UINT8_MAX >> part_depth));
-			ip[i] = (uint8_t)(ip[i] & mask);
-		} else if (part_depth < 0)
-			ip[i] = 0;
-
-		part_depth -= BYTE_SIZE;
-	}
-}
-
-/* copy ipv6 address */
-static inline void
-ip6_copy_addr(uint8_t *dst, const uint8_t *src)
-{
-	rte_memcpy(dst, src, RTE_LPM6_IPV6_ADDR_SIZE);
-}
-
 /*
  * LPM6 rule hash function
  *
@@ -213,9 +182,9 @@ tbl8_available(struct rte_lpm6 *lpm)
  *	  note that ip must be already masked
  */
 static inline void
-rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth)
+rule_key_init(struct rte_lpm6_rule_key *key, const struct rte_ipv6_addr *ip, uint8_t depth)
 {
-	ip6_copy_addr(key->ip, ip);
+	key->ip = *ip;
 	key->depth = depth;
 }
 
@@ -231,7 +200,7 @@ rebuild_lpm(struct rte_lpm6 *lpm)
 
 	while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key,
 			(void **) &next_hop, &iter) >= 0)
-		rte_lpm6_add(lpm, rule_key->ip, rule_key->depth,
+		rte_lpm6_add(lpm, &rule_key->ip, rule_key->depth,
 			(uint32_t) next_hop);
 }
 
@@ -460,7 +429,7 @@ rule_find_with_key(struct rte_lpm6 *lpm,
 
 /* Find a rule */
 static int
-rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rule_find(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth,
 		  uint32_t *next_hop)
 {
 	struct rte_lpm6_rule_key rule_key;
@@ -481,7 +450,7 @@ rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
  *   <0 - error
  */
 static inline int
-rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop)
+rule_add(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth, uint32_t next_hop)
 {
 	int ret, rule_exist;
 	struct rte_lpm6_rule_key rule_key;
@@ -570,7 +539,7 @@ init_tbl8_header(struct rte_lpm6 *lpm, uint32_t tbl_ind,
  * of the bytes being inspected in this step.
  */
 static uint32_t
-get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
+get_bitshift(const struct rte_ipv6_addr *ip, uint8_t first_byte, uint8_t bytes)
 {
 	uint32_t entry_ind, i;
 	int8_t bitshift;
@@ -581,7 +550,7 @@ get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
 
 		if (bitshift < 0)
 			bitshift = 0;
-		entry_ind = entry_ind | ip[i-1] << bitshift;
+		entry_ind = entry_ind | ip->a[i-1] << bitshift;
 	}
 
 	return entry_ind;
@@ -596,7 +565,7 @@ get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
  */
 static inline int
 simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
-		struct rte_lpm6_tbl_entry **next_tbl, const uint8_t *ip,
+		struct rte_lpm6_tbl_entry **next_tbl, const struct rte_ipv6_addr *ip,
 		uint8_t bytes, uint8_t first_byte, uint8_t depth,
 		uint32_t *need_tbl_nb)
 {
@@ -649,7 +618,7 @@ simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
 static inline int
 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
 		uint32_t tbl_ind, struct rte_lpm6_tbl_entry **next_tbl,
-		uint32_t *next_tbl_ind, uint8_t *ip, uint8_t bytes,
+		uint32_t *next_tbl_ind, struct rte_ipv6_addr *ip, uint8_t bytes,
 		uint8_t first_byte, uint8_t depth, uint32_t next_hop,
 		uint8_t is_new_rule)
 {
@@ -814,7 +783,7 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
  *    -ENOSPC not enough tbl8 left
  */
 static int
-simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
+simulate_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *masked_ip, uint8_t depth)
 {
 	struct rte_lpm6_tbl_entry *tbl;
 	struct rte_lpm6_tbl_entry *tbl_next = NULL;
@@ -833,7 +802,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
 	 * Inspect one by one the rest of the bytes until
 	 * the process is completed.
 	 */
-	for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) {
+	for (i = ADD_FIRST_BYTE; i < RTE_IPV6_ADDR_SIZE && ret == 1; i++) {
 		tbl = tbl_next;
 		ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1,
 			(uint8_t)(i + 1), depth, &need_tbl_nb);
@@ -851,7 +820,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
  * Add a route
  */
 int
-rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 	     uint32_t next_hop)
 {
 	struct rte_lpm6_tbl_entry *tbl;
@@ -859,24 +828,24 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	/* init to avoid compiler warning */
 	uint32_t tbl_next_num = 123456;
 	int status;
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	int i;
 
 	/* Check user arguments. */
-	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
 	/* Simulate adding a new route */
-	int ret = simulate_add(lpm, masked_ip, depth);
+	int ret = simulate_add(lpm, &masked_ip, depth);
 	if (ret < 0)
 		return ret;
 
 	/* Add the rule to the rule table. */
-	int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop);
+	int is_new_rule = rule_add(lpm, &masked_ip, depth, next_hop);
 	/* If there is no space available for new rule return error. */
 	if (is_new_rule < 0)
 		return is_new_rule;
@@ -884,7 +853,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	/* Inspect the first three bytes through tbl24 on the first step. */
 	tbl = lpm->tbl24;
 	status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num,
-		masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
+		&masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
 		is_new_rule);
 	assert(status >= 0);
 
@@ -892,10 +861,10 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	 * Inspect one by one the rest of the bytes until
 	 * the process is completed.
 	 */
-	for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
+	for (i = ADD_FIRST_BYTE; i < RTE_IPV6_ADDR_SIZE && status == 1; i++) {
 		tbl = tbl_next;
 		status = add_step(lpm, tbl, tbl_next_num, &tbl_next,
-			&tbl_next_num, masked_ip, 1, (uint8_t)(i + 1),
+			&tbl_next_num, &masked_ip, 1, (uint8_t)(i + 1),
 			depth, next_hop, is_new_rule);
 		assert(status >= 0);
 	}
@@ -910,7 +879,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  */
 static inline int
 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
-		const struct rte_lpm6_tbl_entry **tbl_next, const uint8_t *ip,
+		const struct rte_lpm6_tbl_entry **tbl_next, const struct rte_ipv6_addr *ip,
 		uint8_t first_byte, uint32_t *next_hop)
 {
 	uint32_t tbl8_index, tbl_entry;
@@ -922,7 +891,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
 	if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
 
-		tbl8_index = ip[first_byte-1] +
+		tbl8_index = ip->a[first_byte-1] +
 				((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
 				RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
 
@@ -940,7 +909,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
  * Looks up an IP
  */
 int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip,
 		uint32_t *next_hop)
 {
 	const struct rte_lpm6_tbl_entry *tbl;
@@ -954,7 +923,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
 		return -EINVAL;
 
 	first_byte = LOOKUP_FIRST_BYTE;
-	tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
+	tbl24_index = (ip->a[0] << BYTES2_SIZE) | (ip->a[1] << BYTE_SIZE) | ip->a[2];
 
 	/* Calculate pointer to the first entry to be inspected */
 	tbl = &lpm->tbl24[tbl24_index];
@@ -973,7 +942,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
  */
 int
 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+		struct rte_ipv6_addr *ips,
 		int32_t *next_hops, unsigned int n)
 {
 	unsigned int i;
@@ -989,8 +958,8 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
 
 	for (i = 0; i < n; i++) {
 		first_byte = LOOKUP_FIRST_BYTE;
-		tbl24_index = (ips[i][0] << BYTES2_SIZE) |
-				(ips[i][1] << BYTE_SIZE) | ips[i][2];
+		tbl24_index = (ips[i].a[0] << BYTES2_SIZE) |
+				(ips[i].a[1] << BYTE_SIZE) | ips[i].a[2];
 
 		/* Calculate pointer to the first entry to be inspected */
 		tbl = &lpm->tbl24[tbl24_index];
@@ -999,7 +968,7 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
 			/* Continue inspecting following levels
 			 * until success or failure
 			 */
-			status = lookup_step(lpm, tbl, &tbl_next, ips[i],
+			status = lookup_step(lpm, tbl, &tbl_next, &ips[i],
 					first_byte++, &next_hop);
 			tbl = tbl_next;
 		} while (status == 1);
@@ -1017,21 +986,21 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
  * Look for a rule in the high-level rules table
  */
 int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 			 uint32_t *next_hop)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 
 	/* Check user arguments. */
 	if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
-			(depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+			(depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
-	return rule_find(lpm, masked_ip, depth, next_hop);
+	return rule_find(lpm, &masked_ip, depth, next_hop);
 }
 
 /*
@@ -1042,7 +1011,7 @@ rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   <0 on failure
  */
 static inline int
-rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
+rule_delete(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	int ret;
 	struct rte_lpm6_rule_key rule_key;
@@ -1067,10 +1036,10 @@ rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
  */
 int
 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths,
+		struct rte_ipv6_addr *ips, uint8_t *depths,
 		unsigned n)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	unsigned i;
 
 	/* Check input arguments. */
@@ -1078,9 +1047,9 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
 		return -EINVAL;
 
 	for (i = 0; i < n; i++) {
-		ip6_copy_addr(masked_ip, ips[i]);
-		ip6_mask_addr(masked_ip, depths[i]);
-		rule_delete(lpm, masked_ip, depths[i]);
+		masked_ip = ips[i];
+		rte_ipv6_addr_mask(&masked_ip, depths[i]);
+		rule_delete(lpm, &masked_ip, depths[i]);
 	}
 
 	/*
@@ -1141,7 +1110,7 @@ depth_to_mask_1b(uint8_t depth)
  * Find a less specific rule
  */
 static int
-rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rule_find_less_specific(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth,
 	struct rte_lpm6_rule *rule)
 {
 	int ret;
@@ -1163,12 +1132,12 @@ rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
 			mask = depth_to_mask_1b(mask);
 
 		rule_key.depth = depth;
-		rule_key.ip[depth >> 3] &= mask;
+		rule_key.ip.a[depth >> 3] &= mask;
 
 		ret = rule_find_with_key(lpm, &rule_key, &next_hop);
 		if (ret) {
 			rule->depth = depth;
-			ip6_copy_addr(rule->ip, rule_key.ip);
+			rule->ip = rule_key.ip;
 			rule->next_hop = next_hop;
 			return 1;
 		}
@@ -1181,13 +1150,14 @@ rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
  * Find range of tbl8 cells occupied by a rule
  */
 static void
-rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rule_find_range(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 		  struct rte_lpm6_tbl_entry **from,
 		  struct rte_lpm6_tbl_entry **to,
 		  uint32_t *out_tbl_ind)
 {
 	uint32_t ind;
-	uint32_t first_3bytes = (uint32_t)ip[0] << 16 | ip[1] << 8 | ip[2];
+	uint32_t first_3bytes = (uint32_t)ip->a[0] << 16 |
+			ip->a[1] << 8 | ip->a[2];
 
 	if (depth <= 24) {
 		/* rule is within the top level */
@@ -1213,7 +1183,7 @@ rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 		 * until we reach the last one
 		 */
 		while (depth > 8) {
-			tbl += ip[byte];
+			tbl += ip->a[byte];
 			assert(tbl->ext_entry == 1);
 			/* go to the next level/tbl8 */
 			tbl_ind = tbl->lpm6_tbl8_gindex;
@@ -1224,7 +1194,7 @@ rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 		}
 
 		/* last level/tbl8 */
-		ind = ip[byte] & depth_to_mask_1b(depth);
+		ind = ip->a[byte] & depth_to_mask_1b(depth);
 		*from = &tbl[ind];
 		ind += (1 << (8 - depth)) - 1;
 		*to = &tbl[ind];
@@ -1288,9 +1258,9 @@ remove_tbl(struct rte_lpm6 *lpm, struct rte_lpm_tbl8_hdr *tbl_hdr,
  * Deletes a rule
  */
 int
-rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth)
+rte_lpm6_delete(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	struct rte_lpm6_rule lsp_rule_obj;
 	struct rte_lpm6_rule *lsp_rule;
 	int ret;
@@ -1298,25 +1268,25 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth)
 	struct rte_lpm6_tbl_entry *from, *to;
 
 	/* Check input arguments. */
-	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
 	/* Delete the rule from the rule table. */
-	ret = rule_delete(lpm, masked_ip, depth);
+	ret = rule_delete(lpm, &masked_ip, depth);
 	if (ret < 0)
 		return -ENOENT;
 
 	/* find rule cells */
-	rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind);
+	rule_find_range(lpm, &masked_ip, depth, &from, &to, &tbl_ind);
 
 	/* find a less specific rule (a rule with smaller depth)
 	 * note: masked_ip will be modified, don't use it anymore
 	 */
-	ret = rule_find_less_specific(lpm, masked_ip, depth,
+	ret = rule_find_less_specific(lpm, &masked_ip, depth,
 			&lsp_rule_obj);
 	lsp_rule = ret ? &lsp_rule_obj : NULL;
 
diff --git a/lib/lpm/rte_lpm6.h b/lib/lpm/rte_lpm6.h
index c93683e6240c..d7ebe9245c8e 100644
--- a/lib/lpm/rte_lpm6.h
+++ b/lib/lpm/rte_lpm6.h
@@ -9,6 +9,9 @@
  * RTE Longest Prefix Match for IPv6 (LPM6)
  */
 
+#include <rte_common.h>
+#include <rte_ip6.h>
+
 #include <stdint.h>
 
 #ifdef __cplusplus
@@ -16,8 +19,8 @@ extern "C" {
 #endif
 
 
-#define RTE_LPM6_MAX_DEPTH               128
-#define RTE_LPM6_IPV6_ADDR_SIZE           16
+#define RTE_LPM6_MAX_DEPTH (RTE_DEPRECATED(RTE_LPM6_MAX_DEPTH) RTE_IPV6_MAX_DEPTH)
+#define RTE_LPM6_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_LPM6_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 /** Max number of characters in LPM name. */
 #define RTE_LPM6_NAMESIZE                 32
 
@@ -92,7 +95,7 @@ rte_lpm6_free(struct rte_lpm6 *lpm);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 	     uint32_t next_hop);
 
 /**
@@ -111,7 +114,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   1 if the rule exists, 0 if it does not, a negative value on failure
  */
 int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 			 uint32_t *next_hop);
 
 /**
@@ -127,7 +130,7 @@ rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth);
+rte_lpm6_delete(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Delete a rule from the LPM table.
@@ -145,7 +148,7 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth);
  */
 int
 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
+		struct rte_ipv6_addr *ips, uint8_t *depths, unsigned int n);
 
 /**
  * Delete all rules from the LPM table.
@@ -169,7 +172,7 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm);
  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
  */
 int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_hop);
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint32_t *next_hop);
 
 /**
  * Lookup multiple IP addresses in an LPM table.
@@ -189,7 +192,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_ho
  */
 int
 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+		struct rte_ipv6_addr *ips,
 		int32_t *next_hops, unsigned int n);
 
 #ifdef __cplusplus
diff --git a/lib/node/ip6_lookup.c b/lib/node/ip6_lookup.c
index 6bbcf14e2aa8..faaea5085938 100644
--- a/lib/node/ip6_lookup.c
+++ b/lib/node/ip6_lookup.c
@@ -74,7 +74,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 	/* Get stream for the speculated next node */
 	to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
 	while (n_left_from >= 4) {
-		uint8_t ip_batch[4][16];
+		struct rte_ipv6_addr ip_batch[4];
 		int32_t next_hop[4];
 		uint16_t next[4];
 
@@ -112,28 +112,28 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[0], &ipv6_hdr->dst_addr, 16);
+		ip_batch[0] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf1 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf1, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[1], &ipv6_hdr->dst_addr, 16);
+		ip_batch[1] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf2 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf2, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[2], &ipv6_hdr->dst_addr, 16);
+		ip_batch[2] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf3 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf3, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[3], &ipv6_hdr->dst_addr, 16);
+		ip_batch[3] = ipv6_hdr->dst_addr;
 
 		rte_lpm6_lookup_bulk_func(lpm6, ip_batch, next_hop, 4);
 
@@ -223,7 +223,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 		/* Extract TTL as IPv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
 
-		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr.a, &next_hop);
+		rc = rte_lpm6_lookup(lpm6, &ipv6_hdr->dst_addr, &next_hop);
 		next_hop = (rc == 0) ? next_hop : drop_nh;
 
 		node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
@@ -267,7 +267,7 @@ rte_node_ip6_route_add(const uint8_t *ip, uint8_t depth, uint16_t next_hop,
 	uint32_t val;
 	int ret;
 
-	memcpy(in6.s6_addr, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+	memcpy(in6.s6_addr, ip, RTE_IPV6_ADDR_SIZE);
 	inet_ntop(AF_INET6, &in6, abuf, sizeof(abuf));
 	/* Embedded next node id into 24 bit next hop */
 	val = ((next_node << 16) | next_hop) & ((1ull << 24) - 1);
@@ -278,8 +278,8 @@ rte_node_ip6_route_add(const uint8_t *ip, uint8_t depth, uint16_t next_hop,
 		if (!ip6_lookup_nm.lpm_tbl[socket])
 			continue;
 
-		ret = rte_lpm6_add(ip6_lookup_nm.lpm_tbl[socket], ip, depth,
-				   val);
+		ret = rte_lpm6_add(ip6_lookup_nm.lpm_tbl[socket],
+				   (const struct rte_ipv6_addr *)ip, depth, val);
 		if (ret < 0) {
 			node_err("ip6_lookup",
 				 "Unable to add entry %s / %d nh (%x) to LPM "
diff --git a/lib/table/rte_table_lpm_ipv6.c b/lib/table/rte_table_lpm_ipv6.c
index c1a7412f92cf..dea11130d3d5 100644
--- a/lib/table/rte_table_lpm_ipv6.c
+++ b/lib/table/rte_table_lpm_ipv6.c
@@ -207,7 +207,7 @@ rte_table_lpm_ipv6_entry_add(
 	}
 
 	/* Check if rule is already present in the table */
-	status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+	status = rte_lpm6_is_rule_present(lpm->lpm, &ip_prefix->ip,
 		ip_prefix->depth, &nht_pos0);
 	nht_pos0_valid = status > 0;
 
@@ -225,7 +225,7 @@ rte_table_lpm_ipv6_entry_add(
 	}
 
 	/* Add rule to low level LPM table */
-	if (rte_lpm6_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
+	if (rte_lpm6_add(lpm->lpm, &ip_prefix->ip, ip_prefix->depth,
 		nht_pos) < 0) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 rule add failed", __func__);
 		return -1;
@@ -270,7 +270,7 @@ rte_table_lpm_ipv6_entry_delete(
 	}
 
 	/* Return if rule is not present in the table */
-	status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+	status = rte_lpm6_is_rule_present(lpm->lpm, &ip_prefix->ip,
 		ip_prefix->depth, &nht_pos);
 	if (status < 0) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 algorithmic error",
@@ -283,7 +283,7 @@ rte_table_lpm_ipv6_entry_delete(
 	}
 
 	/* Delete rule from the low-level LPM table */
-	status = rte_lpm6_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
+	status = rte_lpm6_delete(lpm->lpm, &ip_prefix->ip, ip_prefix->depth);
 	if (status) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 rule delete failed",
 			__func__);
@@ -323,11 +323,11 @@ rte_table_lpm_ipv6_lookup(
 
 		if (pkt_mask & pkts_mask) {
 			struct rte_mbuf *pkt = pkts[i];
-			uint8_t *ip = RTE_MBUF_METADATA_UINT8_PTR(pkt,
-				lpm->offset);
+			const struct rte_ipv6_addr *ip;
 			int status;
 			uint32_t nht_pos;
 
+			ip = (struct rte_ipv6_addr *)RTE_MBUF_METADATA_UINT8_PTR(pkt, lpm->offset);
 			status = rte_lpm6_lookup(lpm->lpm, ip, &nht_pos);
 			if (status == 0) {
 				pkts_out_mask |= pkt_mask;
diff --git a/lib/table/rte_table_lpm_ipv6.h b/lib/table/rte_table_lpm_ipv6.h
index 166a5ba9ee67..3ea888360635 100644
--- a/lib/table/rte_table_lpm_ipv6.h
+++ b/lib/table/rte_table_lpm_ipv6.h
@@ -39,13 +39,16 @@
 
 #include <stdint.h>
 
+#include <rte_common.h>
+#include <rte_ip6.h>
+
 #include "rte_table.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#define RTE_LPM_IPV6_ADDR_SIZE 16
+#define RTE_LPM_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_LPM_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 
 /** LPM table parameters */
 struct rte_table_lpm_ipv6_params {
@@ -73,7 +76,7 @@ each rule covering for a multitude of lookup keys (destination IP addresses)
 that share the same data (next hop). */
 struct rte_table_lpm_ipv6_key {
 	/** IP address */
-	uint8_t ip[RTE_LPM_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr ip;
 
 	/** IP address depth. The most significant "depth" bits of the IP
 	address specify the network part of the IP address, while the rest of
-- 
2.47.0


^ permalink raw reply	[relevance 1%]

* [PATCH dpdk v4 07/17] rib6: use IPv6 address structure and utils
    2024-10-18  9:17  1%   ` [PATCH dpdk v4 04/17] net: use IPv6 structure for packet headers Robin Jarry
  2024-10-18  9:17  1%   ` [PATCH dpdk v4 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
@ 2024-10-18  9:17  2%   ` Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18  9:17 UTC (permalink / raw)
  To: dev, Vladimir Medvedkin

Replace ad-hoc uint8_t[16] array types in the API of rte_rib6 with
rte_ipv6_addr structures. Replace duplicate functions and macros with
common ones from rte_ip6.h. Update all code accordingly.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test/test_rib6.c                   |  55 ++++++------
 doc/guides/rel_notes/deprecation.rst   |   7 --
 doc/guides/rel_notes/release_24_11.rst |  11 +++
 lib/fib/rte_fib6.c                     |   8 +-
 lib/fib/trie.c                         |  16 ++--
 lib/rib/meson.build                    |   2 +-
 lib/rib/rte_rib6.c                     | 112 ++++++++++---------------
 lib/rib/rte_rib6.h                     |  27 ++++--
 8 files changed, 113 insertions(+), 125 deletions(-)

diff --git a/app/test/test_rib6.c b/app/test/test_rib6.c
index 33596fddb4e5..ba54a3794ea7 100644
--- a/app/test/test_rib6.c
+++ b/app/test/test_rib6.c
@@ -6,7 +6,7 @@
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
-#include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_rib6.h>
 
 #include "test.h"
@@ -118,14 +118,14 @@ test_insert_invalid(void)
 	struct rte_rib6 *rib = NULL;
 	struct rte_rib6_node *node, *node1;
 	struct rte_rib6_conf config;
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {0};
+	struct rte_ipv6_addr ip = {0};
 	uint8_t depth = 24;
 
 	config.max_nodes = MAX_RULES;
 	config.ext_sz = 0;
 
 	/* rte_rib6_insert: rib == NULL */
-	node = rte_rib6_insert(NULL, ip, depth);
+	node = rte_rib6_insert(NULL, &ip, depth);
 	RTE_TEST_ASSERT(node == NULL,
 		"Call succeeded with invalid parameters\n");
 
@@ -134,14 +134,14 @@ test_insert_invalid(void)
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
 	/* rte_rib6_insert: depth > MAX_DEPTH */
-	node = rte_rib6_insert(rib, ip, MAX_DEPTH + 1);
+	node = rte_rib6_insert(rib, &ip, MAX_DEPTH + 1);
 	RTE_TEST_ASSERT(node == NULL,
 		"Call succeeded with invalid parameters\n");
 
 	/* insert the same ip/depth twice*/
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
-	node1 = rte_rib6_insert(rib, ip, depth);
+	node1 = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node1 == NULL,
 		"Call succeeded with invalid parameters\n");
 
@@ -162,9 +162,8 @@ test_get_fn(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 	void *ext;
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {192, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_ret[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr ip = RTE_IPV6(0xc000, 0x0200, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_ret;
 	uint64_t nh_set = 10;
 	uint64_t nh_ret;
 	uint8_t depth = 24;
@@ -177,11 +176,11 @@ test_get_fn(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	/* test rte_rib6_get_ip() with incorrect args */
-	ret = rte_rib6_get_ip(NULL, ip_ret);
+	ret = rte_rib6_get_ip(NULL, &ip_ret);
 	RTE_TEST_ASSERT(ret < 0,
 		"Call succeeded with invalid parameters\n");
 	ret = rte_rib6_get_ip(node, NULL);
@@ -215,8 +214,8 @@ test_get_fn(void)
 		"Call succeeded with invalid parameters\n");
 
 	/* check the return values */
-	ret = rte_rib6_get_ip(node, ip_ret);
-	RTE_TEST_ASSERT((ret == 0) && (rte_rib6_is_equal(ip_ret, ip)),
+	ret = rte_rib6_get_ip(node, &ip_ret);
+	RTE_TEST_ASSERT((ret == 0) && (rte_ipv6_addr_eq(&ip_ret, &ip)),
 		"Failed to get proper node ip\n");
 	ret = rte_rib6_get_depth(node, &depth_ret);
 	RTE_TEST_ASSERT((ret == 0) && (depth_ret == depth),
@@ -243,8 +242,7 @@ test_basic(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {192, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
+	struct rte_ipv6_addr ip = RTE_IPV6(0xc000, 0x0200, 0, 0, 0, 0, 0, 0);
 	uint64_t next_hop_add = 10;
 	uint64_t next_hop_return;
 	uint8_t depth = 24;
@@ -256,21 +254,21 @@ test_basic(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	status = rte_rib6_set_nh(node, next_hop_add);
 	RTE_TEST_ASSERT(status == 0,
 		"Failed to set rte_rib_node field\n");
 
-	node = rte_rib6_lookup(rib, ip);
+	node = rte_rib6_lookup(rib, &ip);
 	RTE_TEST_ASSERT(node != NULL, "Failed to lookup\n");
 
 	status = rte_rib6_get_nh(node, &next_hop_return);
 	RTE_TEST_ASSERT((status == 0) && (next_hop_add == next_hop_return),
 		"Failed to get proper nexthop\n");
 
-	node = rte_rib6_lookup_exact(rib, ip, depth);
+	node = rte_rib6_lookup_exact(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL,
 		"Failed to lookup\n");
 
@@ -278,12 +276,12 @@ test_basic(void)
 	RTE_TEST_ASSERT((status == 0) && (next_hop_add == next_hop_return),
 		"Failed to get proper nexthop\n");
 
-	rte_rib6_remove(rib, ip, depth);
+	rte_rib6_remove(rib, &ip, depth);
 
-	node = rte_rib6_lookup(rib, ip);
+	node = rte_rib6_lookup(rib, &ip);
 	RTE_TEST_ASSERT(node == NULL,
 		"Lookup returns non existent rule\n");
-	node = rte_rib6_lookup_exact(rib, ip, depth);
+	node = rte_rib6_lookup_exact(rib, &ip, depth);
 	RTE_TEST_ASSERT(node == NULL,
 		"Lookup returns non existent rule\n");
 
@@ -299,12 +297,9 @@ test_tree_traversal(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 130, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 130, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 80};
+	struct rte_ipv6_addr ip = RTE_IPV6(0x0a00, 0x0282, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip1 = RTE_IPV6(0x0a00, 0x0200, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip2 = RTE_IPV6(0x0a00, 0x0282, 0, 0, 0, 0, 0, 0x0050);
 	uint8_t depth = 126;
 
 	config.max_nodes = MAX_RULES;
@@ -313,13 +308,13 @@ test_tree_traversal(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip1, depth);
+	node = rte_rib6_insert(rib, &ip1, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
-	node = rte_rib6_insert(rib, ip2, depth);
+	node = rte_rib6_insert(rib, &ip2, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	node = NULL;
-	node = rte_rib6_get_nxt(rib, ip, 32, node, RTE_RIB6_GET_NXT_ALL);
+	node = rte_rib6_get_nxt(rib, &ip, 32, node, RTE_RIB6_GET_NXT_ALL);
 	RTE_TEST_ASSERT(node != NULL, "Failed to get rib_node\n");
 
 	rte_rib6_free(rib);
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 582d54aece2f..735542d7a1e2 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -80,13 +80,6 @@ Deprecation Notices
     - ``rte_node_ip6_route_add()``
   pipeline
     - ``struct rte_table_action_ipv6_header``
-  rib
-    - ``rte_rib6_lookup()``
-    - ``rte_rib6_lookup_exact()``
-    - ``rte_rib6_get_nxt()``
-    - ``rte_rib6_insert()``
-    - ``rte_rib6_remove()``
-    - ``rte_rib6_get_ip()``
 
 * net, ethdev: The flow item ``RTE_FLOW_ITEM_TYPE_VXLAN_GPE``
   is replaced with ``RTE_FLOW_ITEM_TYPE_VXLAN``.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index aada0df483c8..40045b32ef08 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -304,6 +304,17 @@ API Changes
     - ``struct rte_ipv6_hdr``
   table
     - ``struct rte_table_lpm_ipv6_key``
+  rib
+    - ``rte_rib6_get_ip()``
+    - ``rte_rib6_get_nxt()``
+    - ``rte_rib6_insert()``
+    - ``rte_rib6_lookup()``
+    - ``rte_rib6_lookup_exact()``
+    - ``rte_rib6_remove()``
+    - ``RTE_RIB6_IPV6_ADDR_SIZE`` (deprecated, replace with ``RTE_IPV6_ADDR_SIZE``)
+    - ``get_msk_part()`` (deprecated)
+    - ``rte_rib6_copy_addr()`` (deprecated, replaced with direct structure assignments)
+    - ``rte_rib6_is_equal()`` (deprecated, replaced with ``rte_ipv6_addr_eq()``)
 
 
 ABI Changes
diff --git a/lib/fib/rte_fib6.c b/lib/fib/rte_fib6.c
index ef334da67cc4..9c7d4fde433a 100644
--- a/lib/fib/rte_fib6.c
+++ b/lib/fib/rte_fib6.c
@@ -58,7 +58,7 @@ dummy_lookup(void *fib_p, const struct rte_ipv6_addr *ips,
 	struct rte_rib6_node *node;
 
 	for (i = 0; i < n; i++) {
-		node = rte_rib6_lookup(fib->rib, ips[i].a);
+		node = rte_rib6_lookup(fib->rib, &ips[i]);
 		if (node != NULL)
 			rte_rib6_get_nh(node, &next_hops[i]);
 		else
@@ -74,19 +74,19 @@ dummy_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 	if ((fib == NULL) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
-	node = rte_rib6_lookup_exact(fib->rib, ip->a, depth);
+	node = rte_rib6_lookup_exact(fib->rib, ip, depth);
 
 	switch (op) {
 	case RTE_FIB6_ADD:
 		if (node == NULL)
-			node = rte_rib6_insert(fib->rib, ip->a, depth);
+			node = rte_rib6_insert(fib->rib, ip, depth);
 		if (node == NULL)
 			return -rte_errno;
 		return rte_rib6_set_nh(node, next_hop);
 	case RTE_FIB6_DEL:
 		if (node == NULL)
 			return -ENOENT;
-		rte_rib6_remove(fib->rib, ip->a, depth);
+		rte_rib6_remove(fib->rib, ip, depth);
 		return 0;
 	}
 	return -EINVAL;
diff --git a/lib/fib/trie.c b/lib/fib/trie.c
index 6bb46541feee..4893f6c63615 100644
--- a/lib/fib/trie.c
+++ b/lib/fib/trie.c
@@ -468,13 +468,13 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
 
 	ledge = *ip;
 	do {
-		tmp = rte_rib6_get_nxt(rib, ip->a, depth, tmp,
+		tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
 			RTE_RIB6_GET_NXT_COVER);
 		if (tmp != NULL) {
 			rte_rib6_get_depth(tmp, &tmp_depth);
 			if (tmp_depth == depth)
 				continue;
-			rte_rib6_get_ip(tmp, redge.a);
+			rte_rib6_get_ip(tmp, &redge);
 			if (rte_ipv6_addr_eq(&ledge, &redge)) {
 				get_nxt_net(&ledge, tmp_depth);
 				continue;
@@ -532,11 +532,11 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 	rte_ipv6_addr_mask(&ip_masked, depth);
 
 	if (depth > 24) {
-		tmp = rte_rib6_get_nxt(rib, ip_masked.a,
+		tmp = rte_rib6_get_nxt(rib, &ip_masked,
 			RTE_ALIGN_FLOOR(depth, 8), NULL,
 			RTE_RIB6_GET_NXT_COVER);
 		if (tmp == NULL) {
-			tmp = rte_rib6_lookup(rib, ip->a);
+			tmp = rte_rib6_lookup(rib, ip);
 			if (tmp != NULL) {
 				rte_rib6_get_depth(tmp, &tmp_depth);
 				parent_depth = RTE_MAX(tmp_depth, 24);
@@ -546,7 +546,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 			depth_diff = depth_diff >> 3;
 		}
 	}
-	node = rte_rib6_lookup_exact(rib, ip_masked.a, depth);
+	node = rte_rib6_lookup_exact(rib, &ip_masked, depth);
 	switch (op) {
 	case RTE_FIB6_ADD:
 		if (node != NULL) {
@@ -563,7 +563,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 				dp->number_tbl8s - depth_diff))
 			return -ENOSPC;
 
-		node = rte_rib6_insert(rib, ip_masked.a, depth);
+		node = rte_rib6_insert(rib, &ip_masked, depth);
 		if (node == NULL)
 			return -rte_errno;
 		rte_rib6_set_nh(node, next_hop);
@@ -575,7 +575,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 		}
 		ret = modify_dp(dp, rib, &ip_masked, depth, next_hop);
 		if (ret != 0) {
-			rte_rib6_remove(rib, ip_masked.a, depth);
+			rte_rib6_remove(rib, &ip_masked, depth);
 			return ret;
 		}
 
@@ -597,7 +597,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 
 		if (ret != 0)
 			return ret;
-		rte_rib6_remove(rib, ip->a, depth);
+		rte_rib6_remove(rib, ip, depth);
 
 		dp->rsvd_tbl8s -= depth_diff;
 		return 0;
diff --git a/lib/rib/meson.build b/lib/rib/meson.build
index 7bacbb453592..e98f70848189 100644
--- a/lib/rib/meson.build
+++ b/lib/rib/meson.build
@@ -4,4 +4,4 @@
 
 sources = files('rte_rib.c', 'rte_rib6.c')
 headers = files('rte_rib.h', 'rte_rib6.h')
-deps += ['mempool']
+deps += ['net', 'mempool']
diff --git a/lib/rib/rte_rib6.c b/lib/rib/rte_rib6.c
index 89c8390c63be..84c47fed6d1f 100644
--- a/lib/rib/rte_rib6.c
+++ b/lib/rib/rte_rib6.c
@@ -20,7 +20,6 @@
 #include "rib_log.h"
 
 #define RTE_RIB_VALID_NODE	1
-#define RIB6_MAXDEPTH		128
 /* Maximum length of a RIB6 name. */
 #define RTE_RIB6_NAMESIZE	64
 
@@ -35,7 +34,7 @@ struct rte_rib6_node {
 	struct rte_rib6_node	*right;
 	struct rte_rib6_node	*parent;
 	uint64_t		nh;
-	uint8_t			ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr	ip;
 	uint8_t			depth;
 	uint8_t			flag;
 	uint64_t ext[];
@@ -62,24 +61,8 @@ is_right_node(const struct rte_rib6_node *node)
 	return node->parent->right == node;
 }
 
-/*
- * Check if ip1 is covered by ip2/depth prefix
- */
-static inline bool
-is_covered(const uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE],
-		const uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
-{
-	int i;
-
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		if ((ip1[i] ^ ip2[i]) & get_msk_part(depth, i))
-			return false;
-
-	return true;
-}
-
 static inline int
-get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+get_dir(const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	uint8_t index, msk;
 
@@ -98,14 +81,14 @@ get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
 	 */
 	msk = 1 << (7 - (depth & 7));
 
-	return (ip[index] & msk) != 0;
+	return (ip->a[index] & msk) != 0;
 }
 
 static inline struct rte_rib6_node *
 get_nxt_node(struct rte_rib6_node *node,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+	const struct rte_ipv6_addr *ip)
 {
-	if (node->depth == RIB6_MAXDEPTH)
+	if (node->depth == RTE_IPV6_MAX_DEPTH)
 		return NULL;
 
 	return (get_dir(ip, node->depth)) ? node->right : node->left;
@@ -133,7 +116,7 @@ node_free(struct rte_rib6 *rib, struct rte_rib6_node *ent)
 
 struct rte_rib6_node *
 rte_rib6_lookup(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+	const struct rte_ipv6_addr *ip)
 {
 	struct rte_rib6_node *cur;
 	struct rte_rib6_node *prev = NULL;
@@ -144,7 +127,7 @@ rte_rib6_lookup(struct rte_rib6 *rib,
 	}
 	cur = rib->tree;
 
-	while ((cur != NULL) && is_covered(ip, cur->ip, cur->depth)) {
+	while ((cur != NULL) && rte_ipv6_addr_eq_prefix(ip, &cur->ip, cur->depth)) {
 		if (is_valid_node(cur))
 			prev = cur;
 		cur = get_nxt_node(cur, ip);
@@ -169,32 +152,31 @@ rte_rib6_lookup_parent(struct rte_rib6_node *ent)
 
 struct rte_rib6_node *
 rte_rib6_lookup_exact(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node *cur;
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
-	int i;
+	struct rte_ipv6_addr tmp_ip;
 
-	if (unlikely(rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH)) {
+	if (unlikely(rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 	cur = rib->tree;
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
 	while (cur != NULL) {
-		if (rte_rib6_is_equal(cur->ip, tmp_ip) &&
+		if (rte_ipv6_addr_eq(&cur->ip, &tmp_ip) &&
 				(cur->depth == depth) &&
 				is_valid_node(cur))
 			return cur;
 
-		if (!(is_covered(tmp_ip, cur->ip, cur->depth)) ||
+		if (!rte_ipv6_addr_eq_prefix(&tmp_ip, &cur->ip, cur->depth) ||
 				(cur->depth >= depth))
 			break;
 
-		cur = get_nxt_node(cur, tmp_ip);
+		cur = get_nxt_node(cur, &tmp_ip);
 	}
 
 	return NULL;
@@ -207,32 +189,31 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_get_nxt(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	const struct rte_ipv6_addr *ip,
 	uint8_t depth, struct rte_rib6_node *last, int flag)
 {
 	struct rte_rib6_node *tmp, *prev = NULL;
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
-	int i;
+	struct rte_ipv6_addr tmp_ip;
 
-	if (unlikely(rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH)) {
+	if (unlikely(rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
 	if (last == NULL) {
 		tmp = rib->tree;
 		while ((tmp) && (tmp->depth < depth))
-			tmp = get_nxt_node(tmp, tmp_ip);
+			tmp = get_nxt_node(tmp, &tmp_ip);
 	} else {
 		tmp = last;
 		while ((tmp->parent != NULL) && (is_right_node(tmp) ||
 				(tmp->parent->right == NULL))) {
 			tmp = tmp->parent;
 			if (is_valid_node(tmp) &&
-					(is_covered(tmp->ip, tmp_ip, depth) &&
+					(rte_ipv6_addr_eq_prefix(&tmp->ip, &tmp_ip, depth) &&
 					(tmp->depth > depth)))
 				return tmp;
 		}
@@ -240,7 +221,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
 	}
 	while (tmp) {
 		if (is_valid_node(tmp) &&
-				(is_covered(tmp->ip, tmp_ip, depth) &&
+				(rte_ipv6_addr_eq_prefix(&tmp->ip, &tmp_ip, depth) &&
 				(tmp->depth > depth))) {
 			prev = tmp;
 			if (flag == RTE_RIB6_GET_NXT_COVER)
@@ -253,7 +234,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
 
 void
 rte_rib6_remove(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node *cur, *prev, *child;
 
@@ -286,28 +267,28 @@ rte_rib6_remove(struct rte_rib6 *rib,
 
 struct rte_rib6_node *
 rte_rib6_insert(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node **tmp;
 	struct rte_rib6_node *prev = NULL;
 	struct rte_rib6_node *new_node = NULL;
 	struct rte_rib6_node *common_node = NULL;
-	uint8_t common_prefix[RTE_RIB6_IPV6_ADDR_SIZE];
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr common_prefix;
+	struct rte_ipv6_addr tmp_ip;
 	int i, d;
 	uint8_t common_depth, ip_xor;
 
-	if (unlikely((rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH))) {
+	if (unlikely((rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH))) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
 	tmp = &rib->tree;
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
-	new_node = rte_rib6_lookup_exact(rib, tmp_ip, depth);
+	new_node = rte_rib6_lookup_exact(rib, &tmp_ip, depth);
 	if (new_node != NULL) {
 		rte_errno = EEXIST;
 		return NULL;
@@ -321,7 +302,7 @@ rte_rib6_insert(struct rte_rib6 *rib,
 	new_node->left = NULL;
 	new_node->right = NULL;
 	new_node->parent = NULL;
-	rte_rib6_copy_addr(new_node->ip, tmp_ip);
+	new_node->ip = tmp_ip;
 	new_node->depth = depth;
 	new_node->flag = RTE_RIB_VALID_NODE;
 
@@ -340,28 +321,27 @@ rte_rib6_insert(struct rte_rib6 *rib,
 		 * but node with proper search criteria is found.
 		 * Validate intermediate node and return.
 		 */
-		if (rte_rib6_is_equal(tmp_ip, (*tmp)->ip) &&
-				(depth == (*tmp)->depth)) {
+		if (rte_ipv6_addr_eq(&tmp_ip, &(*tmp)->ip) && (depth == (*tmp)->depth)) {
 			node_free(rib, new_node);
 			(*tmp)->flag |= RTE_RIB_VALID_NODE;
 			++rib->cur_routes;
 			return *tmp;
 		}
 
-		if (!is_covered(tmp_ip, (*tmp)->ip, (*tmp)->depth) ||
+		if (!rte_ipv6_addr_eq_prefix(&tmp_ip, &(*tmp)->ip, (*tmp)->depth) ||
 				((*tmp)->depth >= depth)) {
 			break;
 		}
 		prev = *tmp;
 
-		tmp = (get_dir(tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
+		tmp = (get_dir(&tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
 				&(*tmp)->left;
 	}
 
 	/* closest node found, new_node should be inserted in the middle */
 	common_depth = RTE_MIN(depth, (*tmp)->depth);
-	for (i = 0, d = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
-		ip_xor = tmp_ip[i] ^ (*tmp)->ip[i];
+	for (i = 0, d = 0; i < RTE_IPV6_ADDR_SIZE; i++) {
+		ip_xor = tmp_ip.a[i] ^ (*tmp)->ip.a[i];
 		if (ip_xor == 0)
 			d += 8;
 		else {
@@ -372,13 +352,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 
 	common_depth = RTE_MIN(d, common_depth);
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		common_prefix[i] = tmp_ip[i] & get_msk_part(common_depth, i);
+	common_prefix = tmp_ip;
+	rte_ipv6_addr_mask(&common_prefix, common_depth);
 
-	if (rte_rib6_is_equal(common_prefix, tmp_ip) &&
+	if (rte_ipv6_addr_eq(&common_prefix, &tmp_ip) &&
 			(common_depth == depth)) {
 		/* insert as a parent */
-		if (get_dir((*tmp)->ip, depth))
+		if (get_dir(&(*tmp)->ip, depth))
 			new_node->right = *tmp;
 		else
 			new_node->left = *tmp;
@@ -393,13 +373,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 			rte_errno = ENOMEM;
 			return NULL;
 		}
-		rte_rib6_copy_addr(common_node->ip, common_prefix);
+		common_node->ip = common_prefix;
 		common_node->depth = common_depth;
 		common_node->flag = 0;
 		common_node->parent = (*tmp)->parent;
 		new_node->parent = common_node;
 		(*tmp)->parent = common_node;
-		if (get_dir((*tmp)->ip, common_depth) == 1) {
+		if (get_dir(&(*tmp)->ip, common_depth) == 1) {
 			common_node->left = new_node;
 			common_node->right = *tmp;
 		} else {
@@ -414,13 +394,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 
 int
 rte_rib6_get_ip(const struct rte_rib6_node *node,
-		uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+		struct rte_ipv6_addr *ip)
 {
 	if (unlikely(node == NULL || ip == NULL)) {
 		rte_errno = EINVAL;
 		return -1;
 	}
-	rte_rib6_copy_addr(ip, node->ip);
+	*ip = node->ip;
 	return 0;
 }
 
@@ -604,7 +584,7 @@ rte_rib6_free(struct rte_rib6 *rib)
 
 	while ((tmp = rte_rib6_get_nxt(rib, 0, 0, tmp,
 			RTE_RIB6_GET_NXT_ALL)) != NULL)
-		rte_rib6_remove(rib, tmp->ip, tmp->depth);
+		rte_rib6_remove(rib, &tmp->ip, tmp->depth);
 
 	rte_mempool_free(rib->node_pool);
 
diff --git a/lib/rib/rte_rib6.h b/lib/rib/rte_rib6.h
index 775286f965f2..a60756f798d8 100644
--- a/lib/rib/rte_rib6.h
+++ b/lib/rib/rte_rib6.h
@@ -16,12 +16,13 @@
 
 #include <rte_memcpy.h>
 #include <rte_common.h>
+#include <rte_ip6.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#define RTE_RIB6_IPV6_ADDR_SIZE	16
+#define RTE_RIB6_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_RIB6_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 
 /**
  * rte_rib6_get_nxt() flags
@@ -56,12 +57,15 @@ struct rte_rib6_conf {
  * @param src
  *  pointer from where to copy
  */
+static inline void rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
+	__rte_deprecated_msg("use direct struct assignment");
+
 static inline void
 rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
 {
 	if ((dst == NULL) || (src == NULL))
 		return;
-	rte_memcpy(dst, src, RTE_RIB6_IPV6_ADDR_SIZE);
+	rte_memcpy(dst, src, RTE_IPV6_ADDR_SIZE);
 }
 
 /**
@@ -76,13 +80,16 @@ rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
  *  1 if equal
  *  0 otherwise
  */
+static inline int rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2)
+	__rte_deprecated_msg("use rte_ipv6_addr_eq");
+
 static inline int
 rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2) {
 	int i;
 
 	if ((ip1 == NULL) || (ip2 == NULL))
 		return 0;
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
+	for (i = 0; i < RTE_IPV6_ADDR_SIZE; i++) {
 		if (ip1[i] != ip2[i])
 			return 0;
 	}
@@ -100,6 +107,8 @@ rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2) {
  * @return
  *  8-bit chunk of the 128-bit IPv6 mask
  */
+static inline uint8_t get_msk_part(uint8_t depth, int byte) __rte_deprecated;
+
 static inline uint8_t
 get_msk_part(uint8_t depth, int byte) {
 	uint8_t part;
@@ -124,7 +133,7 @@ get_msk_part(uint8_t depth, int byte) {
  */
 struct rte_rib6_node *
 rte_rib6_lookup(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+	const struct rte_ipv6_addr *ip);
 
 /**
  * Lookup less specific route into the RIB structure
@@ -154,7 +163,7 @@ rte_rib6_lookup_parent(struct rte_rib6_node *ent);
  */
 struct rte_rib6_node *
 rte_rib6_lookup_exact(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Retrieve next more specific prefix from the RIB
@@ -181,7 +190,7 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_get_nxt(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	const struct rte_ipv6_addr *ip,
 	uint8_t depth, struct rte_rib6_node *last, int flag);
 
 /**
@@ -196,7 +205,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
  */
 void
 rte_rib6_remove(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Insert prefix into the RIB
@@ -213,7 +222,7 @@ rte_rib6_remove(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_insert(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Get an ip from rte_rib6_node
@@ -228,7 +237,7 @@ rte_rib6_insert(struct rte_rib6 *rib,
  */
 int
 rte_rib6_get_ip(const struct rte_rib6_node *node,
-		uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+		struct rte_ipv6_addr *ip);
 
 /**
  * Get a depth from rte_rib6_node
-- 
2.47.0


^ permalink raw reply	[relevance 2%]

* [PATCH dpdk v5 07/17] rib6: use IPv6 address structure and utils
    2024-10-18 14:05  1%   ` [PATCH dpdk v5 04/17] net: use IPv6 structure for packet headers Robin Jarry
  2024-10-18 14:05  1%   ` [PATCH dpdk v5 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
@ 2024-10-18 14:05  2%   ` Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18 14:05 UTC (permalink / raw)
  To: dev, Vladimir Medvedkin

Replace ad-hoc uint8_t[16] array types in the API of rte_rib6 with
rte_ipv6_addr structures. Replace duplicate functions and macros with
common ones from rte_ip6.h. Update all code accordingly.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test/test_rib6.c                   |  55 ++++++------
 doc/guides/rel_notes/deprecation.rst   |   7 --
 doc/guides/rel_notes/release_24_11.rst |  11 +++
 lib/fib/rte_fib6.c                     |   8 +-
 lib/fib/trie.c                         |  16 ++--
 lib/rib/meson.build                    |   2 +-
 lib/rib/rte_rib6.c                     | 112 ++++++++++---------------
 lib/rib/rte_rib6.h                     |  27 ++++--
 8 files changed, 113 insertions(+), 125 deletions(-)

diff --git a/app/test/test_rib6.c b/app/test/test_rib6.c
index 33596fddb4e5..ba54a3794ea7 100644
--- a/app/test/test_rib6.c
+++ b/app/test/test_rib6.c
@@ -6,7 +6,7 @@
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
-#include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_rib6.h>
 
 #include "test.h"
@@ -118,14 +118,14 @@ test_insert_invalid(void)
 	struct rte_rib6 *rib = NULL;
 	struct rte_rib6_node *node, *node1;
 	struct rte_rib6_conf config;
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {0};
+	struct rte_ipv6_addr ip = {0};
 	uint8_t depth = 24;
 
 	config.max_nodes = MAX_RULES;
 	config.ext_sz = 0;
 
 	/* rte_rib6_insert: rib == NULL */
-	node = rte_rib6_insert(NULL, ip, depth);
+	node = rte_rib6_insert(NULL, &ip, depth);
 	RTE_TEST_ASSERT(node == NULL,
 		"Call succeeded with invalid parameters\n");
 
@@ -134,14 +134,14 @@ test_insert_invalid(void)
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
 	/* rte_rib6_insert: depth > MAX_DEPTH */
-	node = rte_rib6_insert(rib, ip, MAX_DEPTH + 1);
+	node = rte_rib6_insert(rib, &ip, MAX_DEPTH + 1);
 	RTE_TEST_ASSERT(node == NULL,
 		"Call succeeded with invalid parameters\n");
 
 	/* insert the same ip/depth twice*/
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
-	node1 = rte_rib6_insert(rib, ip, depth);
+	node1 = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node1 == NULL,
 		"Call succeeded with invalid parameters\n");
 
@@ -162,9 +162,8 @@ test_get_fn(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 	void *ext;
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {192, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_ret[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr ip = RTE_IPV6(0xc000, 0x0200, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_ret;
 	uint64_t nh_set = 10;
 	uint64_t nh_ret;
 	uint8_t depth = 24;
@@ -177,11 +176,11 @@ test_get_fn(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	/* test rte_rib6_get_ip() with incorrect args */
-	ret = rte_rib6_get_ip(NULL, ip_ret);
+	ret = rte_rib6_get_ip(NULL, &ip_ret);
 	RTE_TEST_ASSERT(ret < 0,
 		"Call succeeded with invalid parameters\n");
 	ret = rte_rib6_get_ip(node, NULL);
@@ -215,8 +214,8 @@ test_get_fn(void)
 		"Call succeeded with invalid parameters\n");
 
 	/* check the return values */
-	ret = rte_rib6_get_ip(node, ip_ret);
-	RTE_TEST_ASSERT((ret == 0) && (rte_rib6_is_equal(ip_ret, ip)),
+	ret = rte_rib6_get_ip(node, &ip_ret);
+	RTE_TEST_ASSERT((ret == 0) && (rte_ipv6_addr_eq(&ip_ret, &ip)),
 		"Failed to get proper node ip\n");
 	ret = rte_rib6_get_depth(node, &depth_ret);
 	RTE_TEST_ASSERT((ret == 0) && (depth_ret == depth),
@@ -243,8 +242,7 @@ test_basic(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {192, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
+	struct rte_ipv6_addr ip = RTE_IPV6(0xc000, 0x0200, 0, 0, 0, 0, 0, 0);
 	uint64_t next_hop_add = 10;
 	uint64_t next_hop_return;
 	uint8_t depth = 24;
@@ -256,21 +254,21 @@ test_basic(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip, depth);
+	node = rte_rib6_insert(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	status = rte_rib6_set_nh(node, next_hop_add);
 	RTE_TEST_ASSERT(status == 0,
 		"Failed to set rte_rib_node field\n");
 
-	node = rte_rib6_lookup(rib, ip);
+	node = rte_rib6_lookup(rib, &ip);
 	RTE_TEST_ASSERT(node != NULL, "Failed to lookup\n");
 
 	status = rte_rib6_get_nh(node, &next_hop_return);
 	RTE_TEST_ASSERT((status == 0) && (next_hop_add == next_hop_return),
 		"Failed to get proper nexthop\n");
 
-	node = rte_rib6_lookup_exact(rib, ip, depth);
+	node = rte_rib6_lookup_exact(rib, &ip, depth);
 	RTE_TEST_ASSERT(node != NULL,
 		"Failed to lookup\n");
 
@@ -278,12 +276,12 @@ test_basic(void)
 	RTE_TEST_ASSERT((status == 0) && (next_hop_add == next_hop_return),
 		"Failed to get proper nexthop\n");
 
-	rte_rib6_remove(rib, ip, depth);
+	rte_rib6_remove(rib, &ip, depth);
 
-	node = rte_rib6_lookup(rib, ip);
+	node = rte_rib6_lookup(rib, &ip);
 	RTE_TEST_ASSERT(node == NULL,
 		"Lookup returns non existent rule\n");
-	node = rte_rib6_lookup_exact(rib, ip, depth);
+	node = rte_rib6_lookup_exact(rib, &ip, depth);
 	RTE_TEST_ASSERT(node == NULL,
 		"Lookup returns non existent rule\n");
 
@@ -299,12 +297,9 @@ test_tree_traversal(void)
 	struct rte_rib6_node *node;
 	struct rte_rib6_conf config;
 
-	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 130, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE] = {10, 0, 2, 130, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 80};
+	struct rte_ipv6_addr ip = RTE_IPV6(0x0a00, 0x0282, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip1 = RTE_IPV6(0x0a00, 0x0200, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip2 = RTE_IPV6(0x0a00, 0x0282, 0, 0, 0, 0, 0, 0x0050);
 	uint8_t depth = 126;
 
 	config.max_nodes = MAX_RULES;
@@ -313,13 +308,13 @@ test_tree_traversal(void)
 	rib = rte_rib6_create(__func__, SOCKET_ID_ANY, &config);
 	RTE_TEST_ASSERT(rib != NULL, "Failed to create RIB\n");
 
-	node = rte_rib6_insert(rib, ip1, depth);
+	node = rte_rib6_insert(rib, &ip1, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
-	node = rte_rib6_insert(rib, ip2, depth);
+	node = rte_rib6_insert(rib, &ip2, depth);
 	RTE_TEST_ASSERT(node != NULL, "Failed to insert rule\n");
 
 	node = NULL;
-	node = rte_rib6_get_nxt(rib, ip, 32, node, RTE_RIB6_GET_NXT_ALL);
+	node = rte_rib6_get_nxt(rib, &ip, 32, node, RTE_RIB6_GET_NXT_ALL);
 	RTE_TEST_ASSERT(node != NULL, "Failed to get rib_node\n");
 
 	rte_rib6_free(rib);
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 582d54aece2f..735542d7a1e2 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -80,13 +80,6 @@ Deprecation Notices
     - ``rte_node_ip6_route_add()``
   pipeline
     - ``struct rte_table_action_ipv6_header``
-  rib
-    - ``rte_rib6_lookup()``
-    - ``rte_rib6_lookup_exact()``
-    - ``rte_rib6_get_nxt()``
-    - ``rte_rib6_insert()``
-    - ``rte_rib6_remove()``
-    - ``rte_rib6_get_ip()``
 
 * net, ethdev: The flow item ``RTE_FLOW_ITEM_TYPE_VXLAN_GPE``
   is replaced with ``RTE_FLOW_ITEM_TYPE_VXLAN``.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 2efa3410edc6..9fa63eb59a17 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -309,6 +309,17 @@ API Changes
   table
     - ``struct rte_table_lpm_ipv6_key``
     - ``RTE_LPM_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
+  rib
+    - ``rte_rib6_get_ip()``
+    - ``rte_rib6_get_nxt()``
+    - ``rte_rib6_insert()``
+    - ``rte_rib6_lookup()``
+    - ``rte_rib6_lookup_exact()``
+    - ``rte_rib6_remove()``
+    - ``RTE_RIB6_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
+    - ``get_msk_part()`` (deprecated)
+    - ``rte_rib6_copy_addr()`` (deprecated, replaced with direct structure assignments)
+    - ``rte_rib6_is_equal()`` (deprecated, replaced with ``rte_ipv6_addr_eq()``)
 
 
 ABI Changes
diff --git a/lib/fib/rte_fib6.c b/lib/fib/rte_fib6.c
index ef334da67cc4..9c7d4fde433a 100644
--- a/lib/fib/rte_fib6.c
+++ b/lib/fib/rte_fib6.c
@@ -58,7 +58,7 @@ dummy_lookup(void *fib_p, const struct rte_ipv6_addr *ips,
 	struct rte_rib6_node *node;
 
 	for (i = 0; i < n; i++) {
-		node = rte_rib6_lookup(fib->rib, ips[i].a);
+		node = rte_rib6_lookup(fib->rib, &ips[i]);
 		if (node != NULL)
 			rte_rib6_get_nh(node, &next_hops[i]);
 		else
@@ -74,19 +74,19 @@ dummy_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 	if ((fib == NULL) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
-	node = rte_rib6_lookup_exact(fib->rib, ip->a, depth);
+	node = rte_rib6_lookup_exact(fib->rib, ip, depth);
 
 	switch (op) {
 	case RTE_FIB6_ADD:
 		if (node == NULL)
-			node = rte_rib6_insert(fib->rib, ip->a, depth);
+			node = rte_rib6_insert(fib->rib, ip, depth);
 		if (node == NULL)
 			return -rte_errno;
 		return rte_rib6_set_nh(node, next_hop);
 	case RTE_FIB6_DEL:
 		if (node == NULL)
 			return -ENOENT;
-		rte_rib6_remove(fib->rib, ip->a, depth);
+		rte_rib6_remove(fib->rib, ip, depth);
 		return 0;
 	}
 	return -EINVAL;
diff --git a/lib/fib/trie.c b/lib/fib/trie.c
index 6bb46541feee..4893f6c63615 100644
--- a/lib/fib/trie.c
+++ b/lib/fib/trie.c
@@ -468,13 +468,13 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
 
 	ledge = *ip;
 	do {
-		tmp = rte_rib6_get_nxt(rib, ip->a, depth, tmp,
+		tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
 			RTE_RIB6_GET_NXT_COVER);
 		if (tmp != NULL) {
 			rte_rib6_get_depth(tmp, &tmp_depth);
 			if (tmp_depth == depth)
 				continue;
-			rte_rib6_get_ip(tmp, redge.a);
+			rte_rib6_get_ip(tmp, &redge);
 			if (rte_ipv6_addr_eq(&ledge, &redge)) {
 				get_nxt_net(&ledge, tmp_depth);
 				continue;
@@ -532,11 +532,11 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 	rte_ipv6_addr_mask(&ip_masked, depth);
 
 	if (depth > 24) {
-		tmp = rte_rib6_get_nxt(rib, ip_masked.a,
+		tmp = rte_rib6_get_nxt(rib, &ip_masked,
 			RTE_ALIGN_FLOOR(depth, 8), NULL,
 			RTE_RIB6_GET_NXT_COVER);
 		if (tmp == NULL) {
-			tmp = rte_rib6_lookup(rib, ip->a);
+			tmp = rte_rib6_lookup(rib, ip);
 			if (tmp != NULL) {
 				rte_rib6_get_depth(tmp, &tmp_depth);
 				parent_depth = RTE_MAX(tmp_depth, 24);
@@ -546,7 +546,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 			depth_diff = depth_diff >> 3;
 		}
 	}
-	node = rte_rib6_lookup_exact(rib, ip_masked.a, depth);
+	node = rte_rib6_lookup_exact(rib, &ip_masked, depth);
 	switch (op) {
 	case RTE_FIB6_ADD:
 		if (node != NULL) {
@@ -563,7 +563,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 				dp->number_tbl8s - depth_diff))
 			return -ENOSPC;
 
-		node = rte_rib6_insert(rib, ip_masked.a, depth);
+		node = rte_rib6_insert(rib, &ip_masked, depth);
 		if (node == NULL)
 			return -rte_errno;
 		rte_rib6_set_nh(node, next_hop);
@@ -575,7 +575,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 		}
 		ret = modify_dp(dp, rib, &ip_masked, depth, next_hop);
 		if (ret != 0) {
-			rte_rib6_remove(rib, ip_masked.a, depth);
+			rte_rib6_remove(rib, &ip_masked, depth);
 			return ret;
 		}
 
@@ -597,7 +597,7 @@ trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
 
 		if (ret != 0)
 			return ret;
-		rte_rib6_remove(rib, ip->a, depth);
+		rte_rib6_remove(rib, ip, depth);
 
 		dp->rsvd_tbl8s -= depth_diff;
 		return 0;
diff --git a/lib/rib/meson.build b/lib/rib/meson.build
index 7bacbb453592..e98f70848189 100644
--- a/lib/rib/meson.build
+++ b/lib/rib/meson.build
@@ -4,4 +4,4 @@
 
 sources = files('rte_rib.c', 'rte_rib6.c')
 headers = files('rte_rib.h', 'rte_rib6.h')
-deps += ['mempool']
+deps += ['net', 'mempool']
diff --git a/lib/rib/rte_rib6.c b/lib/rib/rte_rib6.c
index 89c8390c63be..84c47fed6d1f 100644
--- a/lib/rib/rte_rib6.c
+++ b/lib/rib/rte_rib6.c
@@ -20,7 +20,6 @@
 #include "rib_log.h"
 
 #define RTE_RIB_VALID_NODE	1
-#define RIB6_MAXDEPTH		128
 /* Maximum length of a RIB6 name. */
 #define RTE_RIB6_NAMESIZE	64
 
@@ -35,7 +34,7 @@ struct rte_rib6_node {
 	struct rte_rib6_node	*right;
 	struct rte_rib6_node	*parent;
 	uint64_t		nh;
-	uint8_t			ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr	ip;
 	uint8_t			depth;
 	uint8_t			flag;
 	uint64_t ext[];
@@ -62,24 +61,8 @@ is_right_node(const struct rte_rib6_node *node)
 	return node->parent->right == node;
 }
 
-/*
- * Check if ip1 is covered by ip2/depth prefix
- */
-static inline bool
-is_covered(const uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE],
-		const uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
-{
-	int i;
-
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		if ((ip1[i] ^ ip2[i]) & get_msk_part(depth, i))
-			return false;
-
-	return true;
-}
-
 static inline int
-get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+get_dir(const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	uint8_t index, msk;
 
@@ -98,14 +81,14 @@ get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
 	 */
 	msk = 1 << (7 - (depth & 7));
 
-	return (ip[index] & msk) != 0;
+	return (ip->a[index] & msk) != 0;
 }
 
 static inline struct rte_rib6_node *
 get_nxt_node(struct rte_rib6_node *node,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+	const struct rte_ipv6_addr *ip)
 {
-	if (node->depth == RIB6_MAXDEPTH)
+	if (node->depth == RTE_IPV6_MAX_DEPTH)
 		return NULL;
 
 	return (get_dir(ip, node->depth)) ? node->right : node->left;
@@ -133,7 +116,7 @@ node_free(struct rte_rib6 *rib, struct rte_rib6_node *ent)
 
 struct rte_rib6_node *
 rte_rib6_lookup(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+	const struct rte_ipv6_addr *ip)
 {
 	struct rte_rib6_node *cur;
 	struct rte_rib6_node *prev = NULL;
@@ -144,7 +127,7 @@ rte_rib6_lookup(struct rte_rib6 *rib,
 	}
 	cur = rib->tree;
 
-	while ((cur != NULL) && is_covered(ip, cur->ip, cur->depth)) {
+	while ((cur != NULL) && rte_ipv6_addr_eq_prefix(ip, &cur->ip, cur->depth)) {
 		if (is_valid_node(cur))
 			prev = cur;
 		cur = get_nxt_node(cur, ip);
@@ -169,32 +152,31 @@ rte_rib6_lookup_parent(struct rte_rib6_node *ent)
 
 struct rte_rib6_node *
 rte_rib6_lookup_exact(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node *cur;
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
-	int i;
+	struct rte_ipv6_addr tmp_ip;
 
-	if (unlikely(rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH)) {
+	if (unlikely(rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 	cur = rib->tree;
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
 	while (cur != NULL) {
-		if (rte_rib6_is_equal(cur->ip, tmp_ip) &&
+		if (rte_ipv6_addr_eq(&cur->ip, &tmp_ip) &&
 				(cur->depth == depth) &&
 				is_valid_node(cur))
 			return cur;
 
-		if (!(is_covered(tmp_ip, cur->ip, cur->depth)) ||
+		if (!rte_ipv6_addr_eq_prefix(&tmp_ip, &cur->ip, cur->depth) ||
 				(cur->depth >= depth))
 			break;
 
-		cur = get_nxt_node(cur, tmp_ip);
+		cur = get_nxt_node(cur, &tmp_ip);
 	}
 
 	return NULL;
@@ -207,32 +189,31 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_get_nxt(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	const struct rte_ipv6_addr *ip,
 	uint8_t depth, struct rte_rib6_node *last, int flag)
 {
 	struct rte_rib6_node *tmp, *prev = NULL;
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
-	int i;
+	struct rte_ipv6_addr tmp_ip;
 
-	if (unlikely(rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH)) {
+	if (unlikely(rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
 	if (last == NULL) {
 		tmp = rib->tree;
 		while ((tmp) && (tmp->depth < depth))
-			tmp = get_nxt_node(tmp, tmp_ip);
+			tmp = get_nxt_node(tmp, &tmp_ip);
 	} else {
 		tmp = last;
 		while ((tmp->parent != NULL) && (is_right_node(tmp) ||
 				(tmp->parent->right == NULL))) {
 			tmp = tmp->parent;
 			if (is_valid_node(tmp) &&
-					(is_covered(tmp->ip, tmp_ip, depth) &&
+					(rte_ipv6_addr_eq_prefix(&tmp->ip, &tmp_ip, depth) &&
 					(tmp->depth > depth)))
 				return tmp;
 		}
@@ -240,7 +221,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
 	}
 	while (tmp) {
 		if (is_valid_node(tmp) &&
-				(is_covered(tmp->ip, tmp_ip, depth) &&
+				(rte_ipv6_addr_eq_prefix(&tmp->ip, &tmp_ip, depth) &&
 				(tmp->depth > depth))) {
 			prev = tmp;
 			if (flag == RTE_RIB6_GET_NXT_COVER)
@@ -253,7 +234,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
 
 void
 rte_rib6_remove(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node *cur, *prev, *child;
 
@@ -286,28 +267,28 @@ rte_rib6_remove(struct rte_rib6 *rib,
 
 struct rte_rib6_node *
 rte_rib6_insert(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+	const struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	struct rte_rib6_node **tmp;
 	struct rte_rib6_node *prev = NULL;
 	struct rte_rib6_node *new_node = NULL;
 	struct rte_rib6_node *common_node = NULL;
-	uint8_t common_prefix[RTE_RIB6_IPV6_ADDR_SIZE];
-	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr common_prefix;
+	struct rte_ipv6_addr tmp_ip;
 	int i, d;
 	uint8_t common_depth, ip_xor;
 
-	if (unlikely((rib == NULL || ip == NULL || depth > RIB6_MAXDEPTH))) {
+	if (unlikely((rib == NULL || ip == NULL || depth > RTE_IPV6_MAX_DEPTH))) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
 	tmp = &rib->tree;
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+	tmp_ip = *ip;
+	rte_ipv6_addr_mask(&tmp_ip, depth);
 
-	new_node = rte_rib6_lookup_exact(rib, tmp_ip, depth);
+	new_node = rte_rib6_lookup_exact(rib, &tmp_ip, depth);
 	if (new_node != NULL) {
 		rte_errno = EEXIST;
 		return NULL;
@@ -321,7 +302,7 @@ rte_rib6_insert(struct rte_rib6 *rib,
 	new_node->left = NULL;
 	new_node->right = NULL;
 	new_node->parent = NULL;
-	rte_rib6_copy_addr(new_node->ip, tmp_ip);
+	new_node->ip = tmp_ip;
 	new_node->depth = depth;
 	new_node->flag = RTE_RIB_VALID_NODE;
 
@@ -340,28 +321,27 @@ rte_rib6_insert(struct rte_rib6 *rib,
 		 * but node with proper search criteria is found.
 		 * Validate intermediate node and return.
 		 */
-		if (rte_rib6_is_equal(tmp_ip, (*tmp)->ip) &&
-				(depth == (*tmp)->depth)) {
+		if (rte_ipv6_addr_eq(&tmp_ip, &(*tmp)->ip) && (depth == (*tmp)->depth)) {
 			node_free(rib, new_node);
 			(*tmp)->flag |= RTE_RIB_VALID_NODE;
 			++rib->cur_routes;
 			return *tmp;
 		}
 
-		if (!is_covered(tmp_ip, (*tmp)->ip, (*tmp)->depth) ||
+		if (!rte_ipv6_addr_eq_prefix(&tmp_ip, &(*tmp)->ip, (*tmp)->depth) ||
 				((*tmp)->depth >= depth)) {
 			break;
 		}
 		prev = *tmp;
 
-		tmp = (get_dir(tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
+		tmp = (get_dir(&tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
 				&(*tmp)->left;
 	}
 
 	/* closest node found, new_node should be inserted in the middle */
 	common_depth = RTE_MIN(depth, (*tmp)->depth);
-	for (i = 0, d = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
-		ip_xor = tmp_ip[i] ^ (*tmp)->ip[i];
+	for (i = 0, d = 0; i < RTE_IPV6_ADDR_SIZE; i++) {
+		ip_xor = tmp_ip.a[i] ^ (*tmp)->ip.a[i];
 		if (ip_xor == 0)
 			d += 8;
 		else {
@@ -372,13 +352,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 
 	common_depth = RTE_MIN(d, common_depth);
 
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
-		common_prefix[i] = tmp_ip[i] & get_msk_part(common_depth, i);
+	common_prefix = tmp_ip;
+	rte_ipv6_addr_mask(&common_prefix, common_depth);
 
-	if (rte_rib6_is_equal(common_prefix, tmp_ip) &&
+	if (rte_ipv6_addr_eq(&common_prefix, &tmp_ip) &&
 			(common_depth == depth)) {
 		/* insert as a parent */
-		if (get_dir((*tmp)->ip, depth))
+		if (get_dir(&(*tmp)->ip, depth))
 			new_node->right = *tmp;
 		else
 			new_node->left = *tmp;
@@ -393,13 +373,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 			rte_errno = ENOMEM;
 			return NULL;
 		}
-		rte_rib6_copy_addr(common_node->ip, common_prefix);
+		common_node->ip = common_prefix;
 		common_node->depth = common_depth;
 		common_node->flag = 0;
 		common_node->parent = (*tmp)->parent;
 		new_node->parent = common_node;
 		(*tmp)->parent = common_node;
-		if (get_dir((*tmp)->ip, common_depth) == 1) {
+		if (get_dir(&(*tmp)->ip, common_depth) == 1) {
 			common_node->left = new_node;
 			common_node->right = *tmp;
 		} else {
@@ -414,13 +394,13 @@ rte_rib6_insert(struct rte_rib6 *rib,
 
 int
 rte_rib6_get_ip(const struct rte_rib6_node *node,
-		uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+		struct rte_ipv6_addr *ip)
 {
 	if (unlikely(node == NULL || ip == NULL)) {
 		rte_errno = EINVAL;
 		return -1;
 	}
-	rte_rib6_copy_addr(ip, node->ip);
+	*ip = node->ip;
 	return 0;
 }
 
@@ -604,7 +584,7 @@ rte_rib6_free(struct rte_rib6 *rib)
 
 	while ((tmp = rte_rib6_get_nxt(rib, 0, 0, tmp,
 			RTE_RIB6_GET_NXT_ALL)) != NULL)
-		rte_rib6_remove(rib, tmp->ip, tmp->depth);
+		rte_rib6_remove(rib, &tmp->ip, tmp->depth);
 
 	rte_mempool_free(rib->node_pool);
 
diff --git a/lib/rib/rte_rib6.h b/lib/rib/rte_rib6.h
index 775286f965f2..a60756f798d8 100644
--- a/lib/rib/rte_rib6.h
+++ b/lib/rib/rte_rib6.h
@@ -16,12 +16,13 @@
 
 #include <rte_memcpy.h>
 #include <rte_common.h>
+#include <rte_ip6.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#define RTE_RIB6_IPV6_ADDR_SIZE	16
+#define RTE_RIB6_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_RIB6_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 
 /**
  * rte_rib6_get_nxt() flags
@@ -56,12 +57,15 @@ struct rte_rib6_conf {
  * @param src
  *  pointer from where to copy
  */
+static inline void rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
+	__rte_deprecated_msg("use direct struct assignment");
+
 static inline void
 rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
 {
 	if ((dst == NULL) || (src == NULL))
 		return;
-	rte_memcpy(dst, src, RTE_RIB6_IPV6_ADDR_SIZE);
+	rte_memcpy(dst, src, RTE_IPV6_ADDR_SIZE);
 }
 
 /**
@@ -76,13 +80,16 @@ rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
  *  1 if equal
  *  0 otherwise
  */
+static inline int rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2)
+	__rte_deprecated_msg("use rte_ipv6_addr_eq");
+
 static inline int
 rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2) {
 	int i;
 
 	if ((ip1 == NULL) || (ip2 == NULL))
 		return 0;
-	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
+	for (i = 0; i < RTE_IPV6_ADDR_SIZE; i++) {
 		if (ip1[i] != ip2[i])
 			return 0;
 	}
@@ -100,6 +107,8 @@ rte_rib6_is_equal(const uint8_t *ip1, const uint8_t *ip2) {
  * @return
  *  8-bit chunk of the 128-bit IPv6 mask
  */
+static inline uint8_t get_msk_part(uint8_t depth, int byte) __rte_deprecated;
+
 static inline uint8_t
 get_msk_part(uint8_t depth, int byte) {
 	uint8_t part;
@@ -124,7 +133,7 @@ get_msk_part(uint8_t depth, int byte) {
  */
 struct rte_rib6_node *
 rte_rib6_lookup(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+	const struct rte_ipv6_addr *ip);
 
 /**
  * Lookup less specific route into the RIB structure
@@ -154,7 +163,7 @@ rte_rib6_lookup_parent(struct rte_rib6_node *ent);
  */
 struct rte_rib6_node *
 rte_rib6_lookup_exact(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Retrieve next more specific prefix from the RIB
@@ -181,7 +190,7 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_get_nxt(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	const struct rte_ipv6_addr *ip,
 	uint8_t depth, struct rte_rib6_node *last, int flag);
 
 /**
@@ -196,7 +205,7 @@ rte_rib6_get_nxt(struct rte_rib6 *rib,
  */
 void
 rte_rib6_remove(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Insert prefix into the RIB
@@ -213,7 +222,7 @@ rte_rib6_remove(struct rte_rib6 *rib,
  */
 struct rte_rib6_node *
 rte_rib6_insert(struct rte_rib6 *rib,
-	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+	const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Get an ip from rte_rib6_node
@@ -228,7 +237,7 @@ rte_rib6_insert(struct rte_rib6 *rib,
  */
 int
 rte_rib6_get_ip(const struct rte_rib6_node *node,
-		uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+		struct rte_ipv6_addr *ip);
 
 /**
  * Get a depth from rte_rib6_node
-- 
2.47.0


^ permalink raw reply	[relevance 2%]

* [PATCH dpdk v5 05/17] lpm6: use IPv6 address structure and utils
    2024-10-18 14:05  1%   ` [PATCH dpdk v5 04/17] net: use IPv6 structure for packet headers Robin Jarry
@ 2024-10-18 14:05  1%   ` Robin Jarry
  2024-10-18 14:05  2%   ` [PATCH dpdk v5 07/17] rib6: " Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18 14:05 UTC (permalink / raw)
  To: dev, Vladimir Medvedkin, Cristian Dumitrescu, Bruce Richardson,
	Konstantin Ananyev, Wathsala Vithanage, Radu Nicolau,
	Akhil Goyal, Jerin Jacob, Kiran Kumar K, Nithin Dabilpuram,
	Zhirun Yan, Pavan Nikhilesh

Replace ad-hoc uint8_t[16] array types in the API of rte_lpm6 with
rte_ipv6_addr structures. Replace duplicate functions and macros with
common ones from rte_ip6.h. Update all code accordingly.

NB: the conversion between 16 bytes arrays and RTE_IPV6() literals was
done automatically with the following python script and adjusted
manually afterwards:

import argparse
import re
import struct

ip = re.compile(
    r"""
    \{
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*,
    [\s\t\r\n]*([\da-fA-Fx]+)[\s\t\r\n]*
    \}
    """,
    re.VERBOSE,
)

def repl(match):
    u8 = bytes(int(g, 0) for g in match.groups("0"))
    nums = []
    for u16 in struct.unpack("!HHHHHHHH", u8):
        if u16:
            nums.append(f"0x{u16:04x}")
        else:
            nums.append("0")
    return f"RTE_IPV6({', '.join(nums)})"

p = argparse.ArgumentParser()
p.add_argument("args", nargs="+")
args = p.parse_args()

for a in args.args:

    with open(a) as f:
        buf = f.read()

    buf = ip.sub(repl, buf)
    with open(a, "w") as f:
        f.write(buf)

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test-fib/main.c                    |   74 +-
 app/test-pipeline/pipeline_lpm_ipv6.c  |   11 +-
 app/test/test_fib6_perf.c              |    6 +-
 app/test/test_lpm6.c                   |  490 +++---
 app/test/test_lpm6_data.h              | 2025 ++++++++++++------------
 app/test/test_lpm6_perf.c              |   10 +-
 app/test/test_table_combined.c         |    2 +-
 app/test/test_table_tables.c           |    8 +-
 doc/guides/rel_notes/deprecation.rst   |    9 -
 doc/guides/rel_notes/release_24_11.rst |   12 +
 examples/ip_fragmentation/main.c       |   23 +-
 examples/ip_pipeline/thread.c          |    2 +-
 examples/ip_reassembly/main.c          |   23 +-
 examples/ipsec-secgw/ipsec_lpm_neon.h  |    7 +-
 examples/ipsec-secgw/ipsec_worker.c    |   11 +-
 examples/ipsec-secgw/ipsec_worker.h    |    4 +-
 examples/ipsec-secgw/rt.c              |   22 +-
 examples/l3fwd-graph/main.c            |    4 +-
 examples/l3fwd/l3fwd_fib.c             |    4 +-
 examples/l3fwd/l3fwd_lpm.c             |    8 +-
 examples/l3fwd/l3fwd_route.h           |    9 +-
 examples/l3fwd/lpm_route_parse.c       |    9 +-
 examples/l3fwd/main.c                  |   32 +-
 lib/lpm/meson.build                    |    1 +
 lib/lpm/rte_lpm6.c                     |  148 +-
 lib/lpm/rte_lpm6.h                     |   19 +-
 lib/node/ip6_lookup.c                  |   18 +-
 lib/table/rte_table_lpm_ipv6.c         |   12 +-
 lib/table/rte_table_lpm_ipv6.h         |    7 +-
 29 files changed, 1466 insertions(+), 1544 deletions(-)

diff --git a/app/test-fib/main.c b/app/test-fib/main.c
index c49bfe8bcec3..9f45d03d81fb 100644
--- a/app/test-fib/main.c
+++ b/app/test-fib/main.c
@@ -62,25 +62,6 @@ enum {
 	(unsigned)((unsigned char *)&addr)[2],	\
 	(unsigned)((unsigned char *)&addr)[1],	\
 	(unsigned)((unsigned char *)&addr)[0]
-
-#define NIPQUAD6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
-#define NIPQUAD6(addr)				\
-	((uint8_t *)addr)[0] << 8 |	\
-	((uint8_t *)addr)[1],		\
-	((uint8_t *)addr)[2] << 8 |	\
-	((uint8_t *)addr)[3],		\
-	((uint8_t *)addr)[4] << 8 |	\
-	((uint8_t *)addr)[5],		\
-	((uint8_t *)addr)[6] << 8 |	\
-	((uint8_t *)addr)[7],		\
-	((uint8_t *)addr)[8] << 8 |	\
-	((uint8_t *)addr)[9],		\
-	((uint8_t *)addr)[10] << 8 |	\
-	((uint8_t *)addr)[11],		\
-	((uint8_t *)addr)[12] << 8 |	\
-	((uint8_t *)addr)[13],		\
-	((uint8_t *)addr)[14] << 8 |	\
-	((uint8_t *)addr)[15]
 #endif
 
 static struct {
@@ -123,7 +104,7 @@ struct rt_rule_4 {
 };
 
 struct rt_rule_6 {
-	uint8_t		addr[16];
+	struct rte_ipv6_addr addr;
 	uint8_t		depth;
 	uint64_t	nh;
 };
@@ -306,15 +287,15 @@ shuffle_rt_6(struct rt_rule_6 *rt, int n)
 
 	for (i = 0; i < n; i++) {
 		j = rte_rand() % n;
-		memcpy(tmp.addr, rt[i].addr, 16);
+		tmp.addr = rt[i].addr;
 		tmp.depth = rt[i].depth;
 		tmp.nh = rt[i].nh;
 
-		memcpy(rt[i].addr, rt[j].addr, 16);
+		rt[i].addr = rt[j].addr;
 		rt[i].depth = rt[j].depth;
 		rt[i].nh = rt[j].nh;
 
-		memcpy(rt[j].addr, tmp.addr, 16);
+		rt[j].addr = tmp.addr;
 		rt[j].depth = tmp.depth;
 		rt[j].nh = tmp.nh;
 	}
@@ -364,7 +345,7 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 	uint32_t a, i, j, k = 0;
 
 	if (config.nb_routes_per_depth[0] != 0) {
-		memset(rt[k].addr, 0, 16);
+		memset(&rt[k].addr, 0, 16);
 		rt[k].depth = 0;
 		rt[k++].nh = rte_rand() & get_max_nh(nh_sz);
 	}
@@ -380,7 +361,7 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 				uint64_t rnd_val = get_rnd_rng((uint64_t)edge,
 					(uint64_t)(edge + step));
 				rnd = rte_cpu_to_be_32(rnd_val << (32 - i));
-				complete_v6_addr((uint32_t *)rt[k].addr,
+				complete_v6_addr((uint32_t *)&rt[k].addr,
 					rnd, a);
 				rt[k].depth = (a * 32) + i;
 				rt[k].nh = rte_rand() & get_max_nh(nh_sz);
@@ -390,19 +371,19 @@ gen_random_rt_6(struct rt_rule_6 *rt, int nh_sz)
 }
 
 static inline void
-set_rnd_ipv6(uint8_t *addr, uint8_t *route, int depth)
+set_rnd_ipv6(struct rte_ipv6_addr *addr, struct rte_ipv6_addr *route, int depth)
 {
 	int i;
 
 	for (i = 0; i < 16; i++)
-		addr[i] = rte_rand();
+		addr->a[i] = rte_rand();
 
 	for (i = 0; i < 16; i++) {
 		if (depth >= 8)
-			addr[i] = route[i];
+			addr->a[i] = route->a[i];
 		else if (depth > 0) {
-			addr[i] &= (uint16_t)UINT8_MAX >> depth;
-			addr[i] |= route[i] & UINT8_MAX << (8 - depth);
+			addr->a[i] &= (uint16_t)UINT8_MAX >> depth;
+			addr->a[i] |= route->a[i] & UINT8_MAX << (8 - depth);
 		} else
 			return;
 		depth -= 8;
@@ -413,7 +394,7 @@ static void
 gen_rnd_lookup_tbl(int af)
 {
 	uint32_t *tbl4 = config.lookup_tbl;
-	uint8_t *tbl6 = config.lookup_tbl;
+	struct rte_ipv6_addr *tbl6 = config.lookup_tbl;
 	struct rt_rule_4 *rt4 = (struct rt_rule_4 *)config.rt;
 	struct rt_rule_6 *rt6 = (struct rt_rule_6 *)config.rt;
 	uint32_t i, j;
@@ -432,11 +413,10 @@ gen_rnd_lookup_tbl(int af)
 		for (i = 0, j = 0; i < config.nb_lookup_ips;
 				i++, j = (j + 1) % config.nb_routes) {
 			if ((rte_rand() % 100) < config.rnd_lookup_ips_ratio) {
-				set_rnd_ipv6(&tbl6[i * 16], rt6[j].addr, 0);
+				set_rnd_ipv6(&tbl6[i], &rt6[j].addr, 0);
 				config.nb_lookup_ips_rnd++;
 			} else {
-				set_rnd_ipv6(&tbl6[i * 16], rt6[j].addr,
-					rt6[j].depth);
+				set_rnd_ipv6(&tbl6[i], &rt6[j].addr, rt6[j].depth);
 			}
 		}
 	}
@@ -522,7 +502,7 @@ parse_rt_6(FILE *f)
 			s = NULL;
 		}
 
-		ret = _inet_net_pton(AF_INET6, in[RT_PREFIX], rt[j].addr);
+		ret = _inet_net_pton(AF_INET6, in[RT_PREFIX], &rt[j].addr);
 		if (ret < 0)
 			return ret;
 
@@ -561,7 +541,7 @@ dump_lookup(int af)
 {
 	FILE *f;
 	uint32_t *tbl4 = config.lookup_tbl;
-	uint8_t *tbl6 = config.lookup_tbl;
+	struct rte_ipv6_addr *tbl6 = config.lookup_tbl;
 	uint32_t i;
 
 	f = fopen(config.lookup_ips_file_s, "w");
@@ -575,7 +555,7 @@ dump_lookup(int af)
 			fprintf(f, NIPQUAD_FMT"\n", NIPQUAD(tbl4[i]));
 	} else {
 		for (i = 0; i < config.nb_lookup_ips; i++)
-			fprintf(f, NIPQUAD6_FMT"\n", NIPQUAD6(&tbl6[i * 16]));
+			fprintf(f, RTE_IPV6_ADDR_FMT"\n", RTE_IPV6_ADDR_SPLIT(&tbl6[i * 16]));
 	}
 	fclose(f);
 	return 0;
@@ -1023,7 +1003,7 @@ dump_rt_6(struct rt_rule_6 *rt)
 	}
 
 	for (i = 0; i < config.nb_routes; i++) {
-		fprintf(f, NIPQUAD6_FMT"/%d %"PRIu64"\n", NIPQUAD6(rt[i].addr),
+		fprintf(f, RTE_IPV6_ADDR_FMT"/%d %"PRIu64"\n", RTE_IPV6_ADDR_SPLIT(&rt[i].addr),
 			rt[i].depth, rt[i].nh);
 
 	}
@@ -1043,7 +1023,7 @@ run_v6(void)
 	int ret = 0;
 	struct rte_lpm6	*lpm = NULL;
 	struct rte_lpm6_config lpm_conf;
-	uint8_t *tbl6;
+	struct rte_ipv6_addr *tbl6;
 	uint64_t fib_nh[BURST_SZ];
 	int32_t lpm_nh[BURST_SZ];
 
@@ -1094,7 +1074,7 @@ run_v6(void)
 	for (k = config.print_fract, i = 0; k > 0; k--) {
 		start = rte_rdtsc_precise();
 		for (j = 0; j < (config.nb_routes - i) / k; j++) {
-			ret = rte_fib6_add(fib, rt[i + j].addr,
+			ret = rte_fib6_add(fib, rt[i + j].addr.a,
 				rt[i + j].depth, rt[i + j].nh);
 			if (unlikely(ret != 0)) {
 				printf("Can not add a route to FIB, err %d\n",
@@ -1120,7 +1100,7 @@ run_v6(void)
 		for (k = config.print_fract, i = 0; k > 0; k--) {
 			start = rte_rdtsc_precise();
 			for (j = 0; j < (config.nb_routes - i) / k; j++) {
-				ret = rte_lpm6_add(lpm, rt[i + j].addr,
+				ret = rte_lpm6_add(lpm, &rt[i + j].addr,
 					rt[i + j].depth, rt[i + j].nh);
 				if (ret != 0) {
 					if (rt[i + j].depth == 0)
@@ -1139,7 +1119,7 @@ run_v6(void)
 	acc = 0;
 	for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 		start = rte_rdtsc_precise();
-		ret = rte_fib6_lookup_bulk(fib, (uint8_t (*)[16])(tbl6 + i*16),
+		ret = rte_fib6_lookup_bulk(fib, &tbl6[i].a,
 			fib_nh, BURST_SZ);
 		acc += rte_rdtsc_precise() - start;
 		if (ret != 0) {
@@ -1154,7 +1134,7 @@ run_v6(void)
 		for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 			start = rte_rdtsc_precise();
 			ret = rte_lpm6_lookup_bulk_func(lpm,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i],
 				lpm_nh, BURST_SZ);
 			acc += rte_rdtsc_precise() - start;
 			if (ret != 0) {
@@ -1166,10 +1146,10 @@ run_v6(void)
 
 		for (i = 0; i < config.nb_lookup_ips; i += BURST_SZ) {
 			rte_fib6_lookup_bulk(fib,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i].a,
 				fib_nh, BURST_SZ);
 			rte_lpm6_lookup_bulk_func(lpm,
-				(uint8_t (*)[16])(tbl6 + i*16),
+				&tbl6[i],
 				lpm_nh, BURST_SZ);
 			for (j = 0; j < BURST_SZ; j++) {
 				if ((fib_nh[j] != (uint32_t)lpm_nh[j]) &&
@@ -1186,7 +1166,7 @@ run_v6(void)
 	for (k = config.print_fract, i = 0; k > 0; k--) {
 		start = rte_rdtsc_precise();
 		for (j = 0; j < (config.nb_routes - i) / k; j++)
-			rte_fib6_delete(fib, rt[i + j].addr, rt[i + j].depth);
+			rte_fib6_delete(fib, rt[i + j].addr.a, rt[i + j].depth);
 
 		printf("AVG FIB delete %"PRIu64"\n",
 			(rte_rdtsc_precise() - start) / j);
@@ -1197,7 +1177,7 @@ run_v6(void)
 		for (k = config.print_fract, i = 0; k > 0; k--) {
 			start = rte_rdtsc_precise();
 			for (j = 0; j < (config.nb_routes - i) / k; j++)
-				rte_lpm6_delete(lpm, rt[i + j].addr,
+				rte_lpm6_delete(lpm, &rt[i + j].addr,
 					rt[i + j].depth);
 
 			printf("AVG LPM delete %"PRIu64"\n",
diff --git a/app/test-pipeline/pipeline_lpm_ipv6.c b/app/test-pipeline/pipeline_lpm_ipv6.c
index 207ffbeff00f..6558e887c859 100644
--- a/app/test-pipeline/pipeline_lpm_ipv6.c
+++ b/app/test-pipeline/pipeline_lpm_ipv6.c
@@ -127,16 +127,11 @@ app_main_loop_worker_pipeline_lpm_ipv6(void) {
 
 		ip = rte_bswap32(i << (24 -
 			rte_popcount32(app.n_ports - 1)));
-		memcpy(key.ip, &ip, sizeof(uint32_t));
+		memcpy(&key.ip, &ip, sizeof(uint32_t));
 
 		printf("Adding rule to IPv6 LPM table (IPv6 destination = "
-			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
-			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => "
-			"port out = %u)\n",
-			key.ip[0], key.ip[1], key.ip[2], key.ip[3],
-			key.ip[4], key.ip[5], key.ip[6], key.ip[7],
-			key.ip[8], key.ip[9], key.ip[10], key.ip[11],
-			key.ip[12], key.ip[13], key.ip[14], key.ip[15],
+			RTE_IPV6_ADDR_FMT "/%u => port out = %u)\n",
+			RTE_IPV6_ADDR_SPLIT(&key.ip),
 			key.depth, i);
 
 		status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
diff --git a/app/test/test_fib6_perf.c b/app/test/test_fib6_perf.c
index fe713e7094e5..f03cd084aa64 100644
--- a/app/test/test_fib6_perf.c
+++ b/app/test/test_fib6_perf.c
@@ -101,7 +101,7 @@ test_fib6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		next_hop_add = (i & ((1 << 14) - 1)) + 1;
-		if (rte_fib6_add(fib, large_route_table[i].ip,
+		if (rte_fib6_add(fib, large_route_table[i].ip.a,
 				large_route_table[i].depth, next_hop_add) == 0)
 			status++;
 	}
@@ -117,7 +117,7 @@ test_fib6_perf(void)
 	count = 0;
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++)
-		memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+		memcpy(ip_batch[i], &large_ips_table[i].ip, 16);
 
 	for (i = 0; i < ITERATIONS; i++) {
 
@@ -140,7 +140,7 @@ test_fib6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		/* rte_fib_delete(fib, ip, depth) */
-		status += rte_fib6_delete(fib, large_route_table[i].ip,
+		status += rte_fib6_delete(fib, large_route_table[i].ip.a,
 				large_route_table[i].depth);
 	}
 
diff --git a/app/test/test_lpm6.c b/app/test/test_lpm6.c
index 1d8a0afa1155..b930fa3f0c17 100644
--- a/app/test/test_lpm6.c
+++ b/app/test/test_lpm6.c
@@ -92,30 +92,6 @@ rte_lpm6_test tests6[] = {
 #define MAX_NUM_TBL8S                                          (1 << 21)
 #define PASS 0
 
-static void
-IPv6(uint8_t *ip, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5,
-		uint8_t b6, uint8_t b7, uint8_t b8, uint8_t b9, uint8_t b10,
-		uint8_t b11, uint8_t b12, uint8_t b13, uint8_t b14, uint8_t b15,
-		uint8_t b16)
-{
-	ip[0] = b1;
-	ip[1] = b2;
-	ip[2] = b3;
-	ip[3] = b4;
-	ip[4] = b5;
-	ip[5] = b6;
-	ip[6] = b7;
-	ip[7] = b8;
-	ip[8] = b9;
-	ip[9] = b10;
-	ip[10] = b11;
-	ip[11] = b12;
-	ip[12] = b13;
-	ip[13] = b14;
-	ip[14] = b15;
-	ip[15] = b16;
-}
-
 /*
  * Check that rte_lpm6_create fails gracefully for incorrect user input
  * arguments
@@ -250,7 +226,7 @@ test4(void)
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
 
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24, next_hop = 100;
 	int32_t status = 0;
 
@@ -259,7 +235,7 @@ test4(void)
 	config.flags = 0;
 
 	/* rte_lpm6_add: lpm == NULL */
-	status = rte_lpm6_add(NULL, ip, depth, next_hop);
+	status = rte_lpm6_add(NULL, &ip, depth, next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -267,11 +243,11 @@ test4(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_add: depth < 1 */
-	status = rte_lpm6_add(lpm, ip, 0, next_hop);
+	status = rte_lpm6_add(lpm, &ip, 0, next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_add: depth > MAX_DEPTH */
-	status = rte_lpm6_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
+	status = rte_lpm6_add(lpm, &ip, (MAX_DEPTH + 1), next_hop);
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -288,7 +264,7 @@ test5(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24;
 	int32_t status = 0;
 
@@ -297,7 +273,7 @@ test5(void)
 	config.flags = 0;
 
 	/* rte_lpm_delete: lpm == NULL */
-	status = rte_lpm6_delete(NULL, ip, depth);
+	status = rte_lpm6_delete(NULL, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -305,11 +281,11 @@ test5(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm_delete: depth < 1 */
-	status = rte_lpm6_delete(lpm, ip, 0);
+	status = rte_lpm6_delete(lpm, &ip, 0);
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm_delete: depth > MAX_DEPTH */
-	status = rte_lpm6_delete(lpm, ip, (MAX_DEPTH + 1));
+	status = rte_lpm6_delete(lpm, &ip, (MAX_DEPTH + 1));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -326,7 +302,7 @@ test6(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint32_t next_hop_return = 0;
 	int32_t status = 0;
 
@@ -335,7 +311,7 @@ test6(void)
 	config.flags = 0;
 
 	/* rte_lpm6_lookup: lpm == NULL */
-	status = rte_lpm6_lookup(NULL, ip, &next_hop_return);
+	status = rte_lpm6_lookup(NULL, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -347,7 +323,7 @@ test6(void)
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_lookup: next_hop = NULL */
-	status = rte_lpm6_lookup(lpm, ip, NULL);
+	status = rte_lpm6_lookup(lpm, &ip, NULL);
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -364,7 +340,7 @@ test7(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[10][16];
+	struct rte_ipv6_addr ips[10];
 	int32_t next_hop_return[10];
 	int32_t status = 0;
 
@@ -373,7 +349,7 @@ test7(void)
 	config.flags = 0;
 
 	/* rte_lpm6_lookup: lpm == NULL */
-	status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10);
+	status = rte_lpm6_lookup_bulk_func(NULL, ips, next_hop_return, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -381,11 +357,11 @@ test7(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_lookup: ip = NULL */
-	status = rte_lpm6_lookup_bulk_func(lpm, NULL, next_hop_return, 10);
+	status = rte_lpm6_lookup_bulk_func(lpm, NULL, next_hop_return, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_lookup: next_hop = NULL */
-	status = rte_lpm6_lookup_bulk_func(lpm, ip, NULL, 10);
+	status = rte_lpm6_lookup_bulk_func(lpm, ips, NULL, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -402,7 +378,7 @@ test8(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[10][16];
+	struct rte_ipv6_addr ips[10];
 	uint8_t depth[10];
 	int32_t status = 0;
 
@@ -411,7 +387,7 @@ test8(void)
 	config.flags = 0;
 
 	/* rte_lpm6_delete: lpm == NULL */
-	status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10);
+	status = rte_lpm6_delete_bulk_func(NULL, ips, depth, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create valid lpm to use in rest of test. */
@@ -419,11 +395,11 @@ test8(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm6_delete: ip = NULL */
-	status = rte_lpm6_delete_bulk_func(lpm, NULL, depth, 10);
+	status = rte_lpm6_delete_bulk_func(lpm, NULL, depth, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	/* rte_lpm6_delete: next_hop = NULL */
-	status = rte_lpm6_delete_bulk_func(lpm, ip, NULL, 10);
+	status = rte_lpm6_delete_bulk_func(lpm, ips, NULL, RTE_DIM(ips));
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm6_free(lpm);
@@ -441,7 +417,7 @@ test9(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 16;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -454,21 +430,21 @@ test9(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	for (i = 0; i < UINT8_MAX; i++) {
-		ip[2] = i;
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		ip.a[2] = i;
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 	}
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	for (i = 0; i < UINT8_MAX; i++) {
-		ip[2] = i;
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		ip.a[2] = i;
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -486,7 +462,7 @@ test10(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -501,20 +477,20 @@ test10(void)
 
 	for (i = 1; i < 128; i++) {
 		depth = (uint8_t)i;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 127;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -531,7 +507,7 @@ test11(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -544,37 +520,37 @@ test11(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 1;
+	ip.a[0] = 1;
 	depth = 25;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 33;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 49;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -592,7 +568,7 @@ test12(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -605,16 +581,16 @@ test12(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 128;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 1;
+	ip.a[0] = 1;
 	depth = 41;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 49;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	rte_lpm6_free(lpm);
@@ -631,7 +607,7 @@ test13(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -644,23 +620,23 @@ test13(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	depth = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 2;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 3;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
 	depth = 2;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 3;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -679,7 +655,7 @@ test14(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 25;
 	uint32_t next_hop_add = 100;
 	int32_t status = 0;
@@ -693,24 +669,24 @@ test14(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	for (i = 0; i < 256; i++) {
-		ip[0] = (uint8_t)i;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		ip.a[0] = (uint8_t)i;
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
-	ip[0] = 255;
-	ip[1] = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	ip.a[0] = 255;
+	ip.a[1] = 1;
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == -ENOSPC);
 
-	ip[0] = 255;
-	ip[1] = 0;
-	status = rte_lpm6_delete(lpm, ip, depth);
+	ip.a[0] = 255;
+	ip.a[1] = 0;
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	ip[0] = 255;
-	ip[1] = 1;
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	ip.a[0] = 255;
+	ip.a[1] = 1;
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_free(lpm);
@@ -726,7 +702,7 @@ test15(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6_ADDR_UNSPEC;
 	uint8_t depth = 24;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -738,16 +714,16 @@ test15(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -763,7 +739,7 @@ test16(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = {12,12,1,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip = RTE_IPV6(0x0c0c, 0x0100, 0, 0, 0, 0, 0, 0);
 	uint8_t depth = 128;
 	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
@@ -775,16 +751,16 @@ test16(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -806,9 +782,9 @@ test17(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip1[] = {127,255,255,255,255,255,255,255,255,
-			255,255,255,255,255,255,255};
-	uint8_t ip2[] = {128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	struct rte_ipv6_addr ip1 =
+		RTE_IPV6(0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff);
+	struct rte_ipv6_addr ip2 = RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -825,14 +801,14 @@ test17(void)
 		/* Let the next_hop_add value = depth. Just for change. */
 		next_hop_add = depth;
 
-		status = rte_lpm6_add(lpm, ip2, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip2, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		/* Check IP in first half of tbl24 which should be empty. */
-		status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 
-		status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip2, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 			(next_hop_return == next_hop_add));
 	}
@@ -841,10 +817,10 @@ test17(void)
 	for (depth = 16; depth >= 1; depth--) {
 		next_hop_add = (depth - 1);
 
-		status = rte_lpm6_delete(lpm, ip2, depth);
+		status = rte_lpm6_delete(lpm, &ip2, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip2, &next_hop_return);
 
 		if (depth != 1) {
 			TEST_LPM_ASSERT((status == 0) &&
@@ -854,7 +830,7 @@ test17(void)
 			TEST_LPM_ASSERT(status == -ENOENT);
 		}
 
-		status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -874,7 +850,7 @@ test18(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16], ip_1[16], ip_2[16];
+	struct rte_ipv6_addr ip, ip_1, ip_2;
 	uint8_t depth, depth_1, depth_2;
 	uint32_t next_hop_add, next_hop_add_1,
 			next_hop_add_2, next_hop_return;
@@ -885,58 +861,58 @@ test18(void)
 	config.flags = 0;
 
 	/* Add & lookup to hit invalid TBL24 entry */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Add & lookup to hit valid TBL24 entry not extended */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 23;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	depth = 23;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -944,37 +920,37 @@ test18(void)
 	/* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
 	 * entry.
 	 */
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x0005, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -982,38 +958,38 @@ test18(void)
 	/* Add & lookup to hit valid extended TBL24 entry with valid TBL8
 	 * entry
 	 */
-	IPv6(ip_1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_1 = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth_1 = 25;
 	next_hop_add_1 = 101;
 
-	IPv6(ip_2, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_2 = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x0005, 0, 0, 0, 0, 0, 0);
 	depth_2 = 32;
 	next_hop_add_2 = 102;
 
 	next_hop_return = 0;
 
-	status = rte_lpm6_add(lpm, ip_1, depth_1, next_hop_add_1);
+	status = rte_lpm6_add(lpm, &ip_1, depth_1, next_hop_add_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_1, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
-	status = rte_lpm6_add(lpm, ip_2, depth_2, next_hop_add_2);
+	status = rte_lpm6_add(lpm, &ip_2, depth_2, next_hop_add_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
 
-	status = rte_lpm6_delete(lpm, ip_2, depth_2);
+	status = rte_lpm6_delete(lpm, &ip_2, depth_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
-	status = rte_lpm6_delete(lpm, ip_1, depth_1);
+	status = rte_lpm6_delete(lpm, &ip_1, depth_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_1, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1037,7 +1013,7 @@ test19(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1052,35 +1028,35 @@ test19(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 16;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 25;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
 	rte_lpm6_delete_all(lpm);
@@ -1090,45 +1066,45 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x000a, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
-	IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0x000a, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -1138,28 +1114,28 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
@@ -1169,56 +1145,56 @@ test19(void)
 	 * (& delete & lookup)
 	 */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Delete a rule that is not present in the TBL24 & lookup */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_delete_all(lpm);
 
 	/* Delete a rule that is not present in the TBL8 & lookup */
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1236,7 +1212,7 @@ test20(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1248,45 +1224,45 @@ test20(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0x000a);
 	depth = 128;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 24;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0x000a);
 	depth = 128;
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm6_free(lpm);
@@ -1304,7 +1280,7 @@ test21(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_batch[4][16];
+	struct rte_ipv6_addr ip_batch[4];
 	uint8_t depth;
 	uint32_t next_hop_add;
 	int32_t next_hop_return[4];
@@ -1317,28 +1293,28 @@ test21(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[0] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0001, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 100;
 
-	status = rte_lpm6_add(lpm, ip_batch[0], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[0], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[1] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0002, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip_batch[1], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[1], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[2] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0003, 0, 0, 0, 0, 0);
 	depth = 48;
 	next_hop_add = 102;
 
-	status = rte_lpm6_add(lpm, ip_batch[2], depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[2], depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[3] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0004, 0, 0, 0, 0, 0);
 
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
 			next_hop_return, 4);
@@ -1363,7 +1339,7 @@ test22(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_batch[5][16];
+	struct rte_ipv6_addr ip_batch[5];
 	uint8_t depth[5];
 	uint32_t next_hop_add;
 	int32_t next_hop_return[5];
@@ -1378,39 +1354,39 @@ test22(void)
 
 	/* Adds 5 rules and look them up */
 
-	IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[0] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0001, 0, 0, 0, 0, 0);
 	depth[0] = 48;
 	next_hop_add = 101;
 
-	status = rte_lpm6_add(lpm, ip_batch[0], depth[0], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[0], depth[0], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[1] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0002, 0, 0, 0, 0, 0);
 	depth[1] = 48;
 	next_hop_add = 102;
 
-	status = rte_lpm6_add(lpm, ip_batch[1], depth[1], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[1], depth[1], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[2] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0003, 0, 0, 0, 0, 0);
 	depth[2] = 48;
 	next_hop_add = 103;
 
-	status = rte_lpm6_add(lpm, ip_batch[2], depth[2], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[2], depth[2], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[3] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0004, 0, 0, 0, 0, 0);
 	depth[3] = 48;
 	next_hop_add = 104;
 
-	status = rte_lpm6_add(lpm, ip_batch[3], depth[3], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[3], depth[3], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0005, 0, 0, 0, 0, 0);
 	depth[4] = 48;
 	next_hop_add = 105;
 
-	status = rte_lpm6_add(lpm, ip_batch[4], depth[4], next_hop_add);
+	status = rte_lpm6_add(lpm, &ip_batch[4], depth[4], next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
@@ -1443,11 +1419,11 @@ test22(void)
 
 	/* Use the delete_bulk function to delete two, one invalid. Lookup again */
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0006, 0, 0, 0, 0, 0);
 	status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[3], depth, 2);
 	TEST_LPM_ASSERT(status == 0);
 
-	IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip_batch[4] = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0x0005, 0, 0, 0, 0, 0);
 	status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
 			next_hop_return, 5);
 	TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
@@ -1481,7 +1457,7 @@ test23(void)
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
 	uint32_t i;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
@@ -1493,22 +1469,22 @@ test23(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	ip = (struct rte_ipv6_addr)RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0);
 	depth = 128;
 	next_hop_add = 100;
 
 	for (i = 0; i < 30; i++) {
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add));
 
-		status = rte_lpm6_delete(lpm, ip, depth);
+		status = rte_lpm6_delete(lpm, &ip, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -1565,7 +1541,7 @@ test25(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint32_t i;
 	uint8_t depth;
 	uint32_t next_hop_add, next_hop_return, next_hop_expected;
@@ -1579,10 +1555,10 @@ test25(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	for (i = 0; i < 1000; i++) {
-		memcpy(ip, large_route_table[i].ip, 16);
+		ip = large_route_table[i].ip;
 		depth = large_route_table[i].depth;
 		next_hop_add = large_route_table[i].next_hop;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 	}
 
@@ -1590,10 +1566,10 @@ test25(void)
 	generate_large_ips_table(1);
 
 	for (i = 0; i < 100000; i++) {
-		memcpy(ip, large_ips_table[i].ip, 16);
+		ip = large_ips_table[i].ip;
 		next_hop_expected = large_ips_table[i].next_hop;
 
-		status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_expected));
 	}
@@ -1615,9 +1591,9 @@ test26(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip_10_32[] = {10, 10, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_10_24[] = {10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	uint8_t ip_20_25[] = {10, 10, 20, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	struct rte_ipv6_addr ip_10_32 = RTE_IPV6(0x0a0a, 0x0a02, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_10_24 = RTE_IPV6(0x0a0a, 0x0a00, 0, 0, 0, 0, 0, 0);
+	struct rte_ipv6_addr ip_20_25 = RTE_IPV6(0x0a0a, 0x1402, 0, 0, 0, 0, 0, 0);
 	uint8_t d_ip_10_32 = 32;
 	uint8_t	d_ip_10_24 = 24;
 	uint8_t	d_ip_20_25 = 25;
@@ -1634,29 +1610,26 @@ test26(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	if ((status = rte_lpm6_add(lpm, ip_10_32, d_ip_10_32,
-			next_hop_ip_10_32)) < 0)
-		return -1;
+	status = rte_lpm6_add(lpm, &ip_10_32, d_ip_10_32, next_hop_ip_10_32);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_32, &next_hop_return);
 	uint32_t test_hop_10_32 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
-	if ((status = rte_lpm6_add(lpm, ip_10_24, d_ip_10_24,
-			next_hop_ip_10_24)) < 0)
-			return -1;
+	status = rte_lpm6_add(lpm, &ip_10_24, d_ip_10_24, next_hop_ip_10_24);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_24, &next_hop_return);
 	uint32_t test_hop_10_24 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
-	if ((status = rte_lpm6_add(lpm, ip_20_25, d_ip_20_25,
-			next_hop_ip_20_25)) < 0)
-		return -1;
+	status = rte_lpm6_add(lpm, &ip_20_25, d_ip_20_25, next_hop_ip_20_25);
+	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip_20_25, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_20_25, &next_hop_return);
 	uint32_t test_hop_20_25 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
@@ -1671,11 +1644,11 @@ test26(void)
 		return -1;
 	}
 
-	status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_32, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
-	status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip_10_24, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
@@ -1695,7 +1668,8 @@ test27(void)
 {
 		struct rte_lpm6 *lpm = NULL;
 		struct rte_lpm6_config config;
-		uint8_t ip[] = {128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,0};
+		struct rte_ipv6_addr ip =
+			RTE_IPV6(0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0x8080, 0);
 		uint8_t depth = 128;
 		uint32_t next_hop_add = 100, next_hop_return;
 		int32_t status = 0;
@@ -1710,19 +1684,19 @@ test27(void)
 
 		depth = 128;
 		next_hop_add = 128;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		depth = 112;
 		next_hop_add = 112;
-		status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+		status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
 		for (i = 0; i < 256; i++) {
-			ip[14] = (uint8_t)i;
+			ip.a[14] = i;
 			for (j = 0; j < 256; j++) {
-				ip[15] = (uint8_t)j;
-				status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+				ip.a[15] = j;
+				status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 				if (i == 0 && j == 0)
 					TEST_LPM_ASSERT(status == 0 && next_hop_return == 128);
 				else
@@ -1746,7 +1720,7 @@ test28(void)
 {
 	struct rte_lpm6 *lpm = NULL;
 	struct rte_lpm6_config config;
-	uint8_t ip[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	struct rte_ipv6_addr ip = RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0);
 	uint8_t depth = 16;
 	uint32_t next_hop_add = 0x001FFFFF, next_hop_return = 0;
 	int32_t status = 0;
@@ -1758,13 +1732,13 @@ test28(void)
 	lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
 	TEST_LPM_ASSERT(lpm != NULL);
 
-	status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+	status = rte_lpm6_add(lpm, &ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm6_lookup(lpm, &ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
-	status = rte_lpm6_delete(lpm, ip, depth);
+	status = rte_lpm6_delete(lpm, &ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 	rte_lpm6_free(lpm);
 
diff --git a/app/test/test_lpm6_data.h b/app/test/test_lpm6_data.h
index 8ddb59563ee4..2a20b9ec36f1 100644
--- a/app/test/test_lpm6_data.h
+++ b/app/test/test_lpm6_data.h
@@ -7,16 +7,17 @@
 #include <stdint.h>
 #include <stdlib.h>
 
+#include <rte_ip6.h>
 #include <rte_random.h>
 
 struct rules_tbl_entry {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t next_hop;
 };
 
 struct ips_tbl_entry {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t next_hop;
 };
 
@@ -29,1006 +30,1006 @@ struct ips_tbl_entry {
  */
 
 static struct rules_tbl_entry large_route_table[] = {
-	{{66, 70, 154, 143, 197, 233, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 146},
-	{{107, 79, 18, 235, 142, 84, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 141},
-	{{247, 132, 113, 1, 215, 247, 183, 239, 128, 0, 0, 0, 0, 0, 0, 0}, 67, 23},
-	{{48, 19, 41, 12, 76, 101, 114, 160, 45, 103, 134, 146, 128, 0, 0, 0}, 97, 252},
-	{{5, 70, 208, 170, 19, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 6},
-	{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 137},
-	{{12, 188, 26, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 9},
-	{{1, 235, 101, 202, 26, 92, 23, 22, 179, 223, 128, 0, 0, 0, 0, 0}, 82, 9},
-	{{215, 19, 224, 102, 45, 133, 102, 249, 56, 20, 214, 219, 93, 125, 52, 0}, 120, 163},
-	{{178, 183, 109, 64, 136, 84, 11, 53, 217, 102, 0, 0, 0, 0, 0, 0}, 79, 197},
-	{{212, 39, 158, 71, 253, 98, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 249},
-	{{92, 58, 159, 130, 105, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 88},
-	{{118, 140, 65, 198, 212, 93, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 104},
-	{{86, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 36},
-	{{79, 135, 242, 193, 197, 11, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 239},
-	{{163, 228, 239, 80, 41, 66, 176, 176, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 201},
-	{{31, 9, 231, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 94},
-	{{108, 144, 205, 39, 215, 26, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 241},
-	{{247, 217, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 239},
-	{{24, 186, 73, 182, 240, 251, 125, 165, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 151},
-	{{245, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 137},
-	{{44, 94, 138, 224, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 231},
-	{{184, 221, 109, 135, 225, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 11},
-	{{51, 179, 136, 184, 30, 118, 24, 16, 26, 161, 206, 101, 0, 0, 0, 0}, 96, 20},
-	{{48, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 68},
-	{{143, 235, 237, 220, 89, 119, 187, 143, 209, 94, 46, 58, 120, 0, 0, 0}, 101, 64},
-	{{121, 190, 90, 177, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 152},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 217},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 101},
-	{{111, 214, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 58},
-	{{162, 23, 52, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 254},
-	{{76, 103, 44, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 148},
-	{{80, 85, 219, 214, 12, 4, 65, 129, 162, 148, 208, 78, 39, 69, 94, 184}, 126, 126},
-	{{80, 54, 251, 28, 152, 23, 244, 192, 151, 83, 6, 144, 223, 213, 224, 128}, 123, 76},
-	{{39, 232, 237, 103, 191, 188, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 240},
-	{{20, 231, 89, 210, 167, 173, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 33},
-	{{125, 67, 198, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 47},
-	{{26, 239, 153, 5, 213, 121, 31, 114, 161, 46, 84, 15, 148, 160, 0, 0}, 109, 41},
-	{{102, 212, 159, 118, 223, 115, 134, 172, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 72},
-	{{85, 181, 241, 127, 3, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 43},
-	{{61, 199, 131, 226, 3, 230, 94, 119, 240, 0, 0, 0, 0, 0, 0, 0}, 68, 26},
-	{{0, 143, 160, 184, 162, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 139},
-	{{170, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 219},
-	{{61, 122, 24, 251, 124, 122, 202, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 105},
-	{{33, 219, 226, 3, 180, 190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 210},
-	{{51, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 151},
-	{{106, 185, 11, 122, 197, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 28},
-	{{192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 64},
-	{{239, 195, 77, 239, 131, 156, 2, 246, 191, 178, 204, 160, 21, 213, 30, 128}, 121, 9},
-	{{141, 207, 181, 99, 55, 245, 151, 228, 65, 50, 85, 16, 0, 0, 0, 0}, 92, 250},
-	{{110, 159, 230, 251, 224, 210, 58, 49, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 200},
-	{{134, 26, 104, 32, 129, 41, 201, 50, 164, 69, 178, 156, 156, 133, 8, 218}, 127, 132},
-	{{253, 207, 116, 105, 210, 166, 186, 99, 182, 0, 0, 0, 0, 0, 0, 0}, 71, 182},
-	{{211, 73, 38, 80, 183, 168, 52, 138, 25, 214, 112, 8, 252, 0, 0, 0}, 102, 7},
-	{{200, 244, 108, 238, 164, 141, 215, 39, 233, 249, 120, 80, 112, 0, 0, 0}, 100, 146},
-	{{107, 44, 250, 202, 64, 37, 107, 105, 140, 0, 0, 0, 0, 0, 0, 0}, 70, 98},
-	{{93, 86, 56, 27, 159, 195, 126, 39, 240, 201, 48, 0, 0, 0, 0, 0}, 86, 179},
-	{{32, 202, 214, 242, 39, 141, 61, 146, 138, 96, 0, 0, 0, 0, 0, 0}, 77, 245},
-	{{167, 77, 249, 28, 210, 196, 227, 241, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
-	{{241, 59, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 5},
-	{{143, 68, 146, 210, 173, 155, 251, 173, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 169},
-	{{167, 180, 226, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 52},
-	{{241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 177},
-	{{238, 9, 168, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 74},
-	{{203, 148, 16, 96, 125, 18, 86, 1, 91, 244, 251, 20, 31, 14, 75, 128}, 122, 212},
-	{{111, 227, 137, 94, 65, 21, 77, 137, 119, 130, 159, 19, 159, 45, 18, 192}, 122, 238},
-	{{59, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 18},
-	{{110, 192, 255, 120, 84, 215, 3, 130, 38, 224, 0, 0, 0, 0, 0, 0}, 75, 155},
-	{{152, 79, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 97},
-	{{118, 186, 157, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 8},
-	{{70, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 123},
-	{{253, 119, 114, 227, 18, 243, 81, 61, 238, 107, 190, 144, 0, 0, 0, 0}, 92, 11},
-	{{166, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 211},
-	{{43, 95, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 116},
-	{{94, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 57},
-	{{182, 251, 195, 132, 66, 7, 208, 146, 223, 231, 211, 181, 25, 176, 0, 0}, 108, 178},
-	{{152, 166, 111, 233, 194, 17, 230, 41, 221, 253, 69, 123, 108, 0, 0, 0}, 102, 93},
-	{{106, 141, 235, 190, 82, 241, 152, 186, 195, 81, 86, 144, 0, 0, 0, 0}, 92, 3},
-	{{32, 81, 210, 153, 151, 29, 11, 62, 127, 177, 194, 254, 103, 83, 58, 128}, 121, 162},
-	{{79, 112, 224, 26, 174, 39, 98, 181, 115, 57, 209, 189, 136, 48, 0, 0}, 109, 125},
-	{{106, 197, 83, 151, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 33},
-	{{190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 254},
-	{{156, 73, 249, 148, 55, 192, 20, 42, 142, 128, 0, 0, 0, 0, 0, 0}, 74, 66},
-	{{64, 107, 36, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 4},
-	{{115, 148, 71, 250, 158, 174, 168, 249, 106, 110, 196, 0, 0, 0, 0, 0}, 86, 122},
-	{{18, 139, 152, 44, 38, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 59},
-	{{55, 229, 117, 106, 146, 95, 74, 220, 122, 0, 84, 202, 183, 138, 120, 0}, 117, 99},
-	{{153, 211, 3, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 41},
-	{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 112},
-	{{49, 192, 102, 142, 216, 3, 114, 64, 165, 128, 168, 0, 0, 0, 0, 0}, 85, 255},
-	{{201, 143, 240, 240, 209, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 106},
-	{{158, 19, 164, 196, 87, 162, 33, 120, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 170},
-	{{5, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 86},
-	{{34, 170, 246, 62, 198, 85, 193, 227, 252, 68, 0, 0, 0, 0, 0, 0}, 79, 155},
-	{{21, 52, 9, 86, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 65},
-	{{203, 81, 49, 171, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 39},
-	{{211, 218, 87, 244, 93, 181, 118, 41, 156, 143, 254, 0, 0, 0, 0, 0}, 90, 162},
-	{{77, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 69},
-	{{158, 219, 219, 39, 4, 219, 100, 63, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 163},
-	{{61, 50, 232, 1, 185, 252, 243, 54, 189, 240, 170, 192, 0, 0, 0, 0}, 90, 116},
-	{{241, 143, 33, 19, 247, 55, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 19},
-	{{61, 28, 61, 252, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 48},
-	{{102, 112, 194, 108, 90, 253, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 230},
-	{{74, 88, 58, 66, 172, 41, 144, 204, 195, 240, 0, 0, 0, 0, 0, 0}, 78, 155},
-	{{44, 148, 187, 58, 190, 59, 190, 187, 124, 138, 222, 131, 0, 0, 0, 0}, 96, 158},
-	{{67, 7, 216, 139, 93, 224, 20, 135, 186, 86, 209, 111, 60, 80, 0, 0}, 113, 252},
-	{{209, 26, 12, 174, 5, 101, 164, 181, 237, 63, 192, 57, 54, 120, 0, 0}, 110, 176},
-	{{4, 66, 232, 52, 239, 56, 48, 58, 192, 0, 0, 0, 0, 0, 0, 0}, 66, 211},
-	{{158, 165, 2, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 15},
-	{{85, 204, 245, 198, 68, 44, 39, 71, 32, 0, 0, 0, 0, 0, 0, 0}, 68, 95},
-	{{181, 134, 25, 87, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 169},
-	{{26, 230, 61, 36, 79, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 249},
-	{{5, 170, 198, 139, 65, 186, 188, 45, 42, 253, 165, 89, 206, 0, 0, 0}, 105, 61},
-	{{211, 245, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 63},
-	{{117, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 43},
-	{{103, 17, 123, 102, 70, 206, 90, 92, 124, 198, 0, 0, 0, 0, 0, 0}, 81, 228},
-	{{192, 237, 88, 244, 53, 30, 61, 160, 143, 64, 0, 0, 0, 0, 0, 0}, 78, 165},
-	{{199, 82, 217, 183, 2, 179, 195, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
-	{{157, 230, 79, 162, 57, 125, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 211},
-	{{27, 67, 64, 235, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 210},
-	{{72, 158, 163, 106, 193, 137, 190, 7, 250, 165, 249, 73, 64, 0, 0, 0}, 99, 61},
-	{{34, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 120},
-	{{215, 141, 95, 192, 189, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 94},
-	{{31, 181, 56, 141, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 153},
-	{{153, 73, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 221},
-	{{162, 107, 41, 189, 165, 155, 22, 139, 165, 72, 96, 0, 0, 0, 0, 0}, 87, 163},
-	{{218, 17, 204, 165, 217, 251, 107, 45, 29, 15, 192, 167, 75, 0, 0, 0}, 106, 188},
-	{{200, 124, 238, 213, 35, 228, 94, 141, 86, 187, 101, 60, 115, 52, 131, 16}, 124, 15},
-	{{74, 237, 160, 56, 141, 217, 191, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 28},
-	{{163, 47, 242, 103, 173, 217, 88, 154, 38, 200, 32, 0, 0, 0, 0, 0}, 84, 240},
-	{{20, 227, 128, 28, 144, 147, 22, 13, 94, 129, 107, 88, 0, 0, 0, 0}, 93, 59},
-	{{95, 144, 229, 107, 218, 125, 204, 233, 161, 42, 180, 64, 0, 0, 0, 0}, 90, 195},
-	{{155, 220, 83, 208, 108, 16, 134, 156, 128, 0, 0, 0, 0, 0, 0, 0}, 66, 10},
-	{{179, 138, 55, 80, 190, 153, 12, 237, 22, 120, 69, 0, 0, 0, 0, 0}, 88, 206},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 137},
-	{{3, 119, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 225},
-	{{13, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 223},
-	{{117, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 29},
-	{{164, 19, 195, 47, 136, 190, 156, 255, 30, 74, 143, 134, 162, 0, 0, 0}, 103, 166},
-	{{40, 235, 94, 135, 135, 230, 71, 33, 64, 233, 0, 0, 0, 0, 0, 0}, 80, 178},
-	{{222, 151, 166, 97, 129, 250, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 38},
-	{{174, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 141},
-	{{6, 189, 100, 150, 250, 13, 46, 98, 228, 139, 50, 52, 52, 196, 128, 0}, 116, 230},
-	{{75, 252, 89, 205, 37, 52, 106, 79, 188, 120, 54, 119, 160, 0, 0, 0}, 99, 124},
-	{{38, 18, 146, 6, 63, 64, 231, 10, 152, 199, 5, 143, 147, 4, 252, 0}, 118, 54},
-	{{111, 119, 169, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 162},
-	{{105, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 32},
-	{{143, 57, 57, 101, 98, 182, 74, 227, 205, 143, 253, 237, 8, 0, 0, 0}, 102, 237},
-	{{30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 215},
-	{{14, 232, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 138},
-	{{14, 53, 67, 216, 229, 155, 149, 139, 31, 253, 184, 126, 133, 108, 40, 0}, 118, 73},
-	{{22, 58, 40, 143, 188, 132, 239, 14, 181, 252, 81, 192, 0, 0, 0, 0}, 90, 43},
-	{{11, 222, 185, 243, 248, 150, 79, 230, 214, 213, 3, 23, 193, 196, 0, 0}, 112, 88},
-	{{14, 226, 198, 117, 84, 93, 22, 96, 77, 241, 173, 68, 68, 204, 72, 0}, 119, 91},
-	{{15, 103, 247, 219, 150, 142, 92, 50, 144, 0, 0, 0, 0, 0, 0, 0}, 69, 140},
-	{{0, 213, 77, 244, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 65},
-	{{178, 174, 174, 239, 72, 181, 36, 217, 40, 169, 12, 104, 149, 157, 125, 128}, 122, 201},
-	{{118, 53, 55, 17, 97, 227, 243, 176, 2, 0, 0, 0, 0, 0, 0, 0}, 72, 69},
-	{{21, 253, 4, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 170},
-	{{5, 249, 186, 133, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 192},
-	{{47, 79, 35, 66, 11, 178, 161, 28, 87, 180, 45, 128, 0, 0, 0, 0}, 89, 21},
-	{{242, 227, 20, 73, 150, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 35},
-	{{121, 169, 102, 118, 157, 192, 154, 186, 126, 0, 0, 0, 0, 0, 0, 0}, 71, 235},
-	{{9, 138, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 240},
-	{{45, 173, 14, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 136},
-	{{127, 47, 51, 201, 236, 45, 142, 80, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 186},
-	{{247, 233, 34, 38, 181, 207, 127, 20, 224, 118, 59, 148, 0, 0, 0, 0}, 95, 174},
-	{{126, 187, 198, 104, 245, 223, 219, 18, 31, 124, 0, 0, 0, 0, 0, 0}, 79, 153},
-	{{3, 163, 107, 228, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 35, 118},
-	{{167, 109, 2, 95, 11, 62, 45, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 113},
-	{{76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 58},
-	{{58, 190, 204, 151, 222, 147, 47, 78, 38, 203, 9, 17, 64, 0, 0, 0}, 101, 206},
-	{{254, 220, 254, 220, 204, 79, 35, 127, 242, 63, 106, 232, 127, 180, 0, 0}, 111, 42},
-	{{77, 156, 8, 209, 181, 37, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 230},
-	{{65, 89, 137, 76, 208, 199, 166, 90, 128, 0, 0, 0, 0, 0, 0, 0}, 67, 6},
-	{{47, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 254},
-	{{172, 154, 12, 108, 77, 37, 106, 8, 234, 7, 248, 212, 112, 160, 0, 0}, 108, 214},
-	{{254, 117, 239, 244, 154, 89, 166, 241, 12, 108, 127, 153, 206, 160, 0, 0}, 107, 43},
-	{{113, 160, 206, 52, 143, 12, 9, 148, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 178},
-	{{178, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 179},
-	{{229, 177, 28, 106, 59, 75, 182, 241, 36, 79, 224, 0, 0, 0, 0, 0}, 87, 236},
-	{{156, 72, 93, 193, 50, 235, 75, 228, 88, 115, 89, 119, 128, 0, 0, 0}, 98, 184},
-	{{28, 232, 28, 249, 83, 105, 211, 7, 136, 147, 231, 64, 0, 0, 0, 0}, 91, 95},
-	{{217, 33, 23, 107, 74, 42, 135, 197, 144, 34, 40, 243, 13, 126, 36, 136}, 127, 152},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 113},
-	{{85, 172, 121, 126, 213, 57, 225, 54, 197, 73, 85, 251, 9, 64, 0, 0}, 108, 137},
-	{{104, 46, 25, 71, 86, 220, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 224},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 61},
-	{{241, 113, 254, 106, 53, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 205},
-	{{29, 36, 12, 244, 197, 127, 240, 8, 167, 134, 154, 248, 199, 123, 143, 240}, 124, 170},
-	{{58, 29, 129, 94, 43, 139, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 117},
-	{{213, 124, 147, 196, 7, 82, 67, 70, 228, 0, 0, 0, 0, 0, 0, 0}, 70, 225},
-	{{164, 168, 161, 140, 87, 85, 250, 41, 34, 0, 0, 0, 0, 0, 0, 0}, 72, 34},
-	{{186, 142, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 5},
-	{{237, 249, 9, 70, 247, 97, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 92},
-	{{155, 92, 145, 218, 125, 226, 226, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 230},
-	{{35, 169, 62, 156, 86, 4, 125, 219, 119, 113, 191, 75, 198, 113, 0, 0}, 112, 61},
-	{{207, 63, 96, 186, 26, 68, 115, 161, 163, 59, 190, 166, 18, 78, 232, 0}, 117, 221},
-	{{86, 40, 200, 199, 247, 86, 159, 179, 191, 184, 117, 173, 211, 158, 0, 128}, 121, 105},
-	{{104, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 181},
-	{{205, 35, 123, 178, 36, 64, 62, 153, 195, 250, 0, 0, 0, 0, 0, 0}, 79, 110},
-	{{117, 40, 57, 157, 138, 160, 223, 59, 155, 145, 64, 0, 0, 0, 0, 0}, 86, 103},
-	{{74, 166, 140, 146, 74, 72, 229, 99, 167, 124, 107, 117, 217, 14, 246, 64}, 123, 218},
-	{{12, 222, 244, 183, 83, 146, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 146},
-	{{11, 98, 146, 110, 95, 96, 80, 142, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 90},
-	{{235, 5, 187, 199, 30, 170, 82, 187, 228, 159, 22, 25, 204, 112, 0, 0}, 108, 197},
-	{{35, 96, 146, 145, 155, 116, 252, 181, 29, 205, 230, 246, 30, 0, 0, 0}, 103, 158},
-	{{174, 38, 56, 244, 227, 102, 252, 237, 128, 86, 0, 0, 0, 0, 0, 0}, 81, 118},
-	{{65, 134, 37, 58, 90, 125, 60, 84, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 95},
-	{{253, 117, 135, 98, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 152},
-	{{111, 115, 188, 184, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 239},
-	{{202, 24, 89, 9, 149, 45, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 48},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 228},
-	{{244, 98, 52, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 247},
-	{{151, 167, 43, 178, 116, 194, 173, 126, 236, 98, 40, 0, 0, 0, 0, 0}, 85, 12},
-	{{60, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 129},
-	{{208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 50},
-	{{126, 11, 216, 242, 7, 45, 121, 208, 110, 135, 210, 75, 59, 182, 228, 42}, 128, 250},
-	{{217, 26, 184, 146, 3, 18, 240, 15, 135, 8, 0, 0, 0, 0, 0, 0}, 77, 249},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 230},
-	{{145, 28, 29, 184, 2, 85, 234, 135, 98, 111, 136, 32, 0, 0, 0, 0}, 92, 228},
-	{{108, 104, 255, 254, 34, 95, 72, 157, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 181},
-	{{153, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 206},
-	{{22, 250, 130, 201, 132, 248, 189, 108, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 122},
-	{{158, 165, 234, 18, 44, 61, 82, 61, 235, 0, 0, 0, 0, 0, 0, 0}, 72, 81},
-	{{236, 57, 124, 110, 124, 218, 82, 70, 142, 78, 18, 128, 0, 0, 0, 0}, 95, 175},
-	{{94, 209, 200, 201, 149, 162, 248, 134, 239, 226, 1, 237, 16, 134, 56, 0}, 118, 170},
-	{{187, 42, 31, 144, 236, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 174},
-	{{90, 214, 185, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 104},
-	{{194, 220, 211, 212, 211, 32, 196, 98, 71, 62, 153, 103, 80, 35, 128, 0}, 114, 113},
-	{{24, 255, 158, 64, 180, 148, 10, 81, 243, 247, 0, 0, 0, 0, 0, 0}, 80, 89},
-	{{231, 155, 100, 242, 112, 160, 160, 95, 98, 253, 219, 21, 239, 90, 0, 0}, 113, 151},
-	{{225, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 108},
-	{{136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 224},
-	{{250, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 95},
-	{{72, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 173},
-	{{185, 51, 51, 167, 18, 44, 36, 59, 35, 135, 20, 104, 0, 0, 0, 0}, 93, 176},
-	{{57, 146, 252, 60, 197, 68, 39, 162, 80, 198, 137, 50, 97, 92, 124, 0}, 119, 84},
-	{{254, 46, 242, 105, 86, 94, 96, 14, 130, 176, 0, 0, 0, 0, 0, 0}, 78, 104},
-	{{247, 202, 176, 76, 69, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 236},
-	{{50, 233, 203, 77, 42, 21, 115, 163, 166, 138, 192, 52, 178, 37, 112, 0}, 116, 153},
-	{{62, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 190},
-	{{53, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 202},
-	{{198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 54},
-	{{189, 234, 106, 247, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 156},
-	{{110, 24, 228, 65, 216, 147, 9, 48, 60, 179, 172, 91, 115, 185, 227, 96}, 126, 245},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 218},
-	{{74, 177, 89, 218, 248, 18, 176, 39, 118, 173, 201, 152, 0, 0, 0, 0}, 93, 72},
-	{{31, 13, 153, 92, 27, 122, 150, 232, 88, 95, 202, 171, 208, 158, 0, 0}, 112, 183},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 183},
-	{{63, 37, 46, 158, 139, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 241},
-	{{53, 209, 59, 13, 202, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 106},
-	{{184, 44, 149, 221, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 180},
-	{{222, 134, 37, 62, 223, 193, 39, 246, 15, 151, 200, 146, 0, 0, 0, 0}, 96, 142},
-	{{199, 176, 189, 37, 233, 177, 252, 216, 94, 175, 253, 119, 96, 0, 0, 0}, 100, 6},
-	{{44, 195, 201, 106, 209, 120, 122, 38, 43, 30, 142, 22, 196, 175, 100, 0}, 118, 33},
-	{{33, 166, 10, 174, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 224},
-	{{54, 1, 189, 195, 133, 49, 36, 80, 138, 200, 0, 0, 0, 0, 0, 0}, 78, 14},
-	{{241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 149},
-	{{221, 131, 4, 247, 112, 89, 187, 119, 219, 80, 122, 156, 216, 160, 0, 0}, 108, 131},
-	{{102, 20, 46, 129, 202, 247, 129, 1, 237, 71, 103, 58, 217, 44, 4, 0}, 121, 133},
-	{{107, 156, 151, 44, 215, 98, 171, 126, 85, 32, 42, 128, 0, 0, 0, 0}, 89, 33},
-	{{54, 25, 70, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 204},
-	{{149, 211, 242, 14, 112, 219, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 43},
-	{{95, 26, 143, 193, 8, 76, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 168},
-	{{63, 102, 244, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 180},
-	{{64, 85, 124, 226, 59, 239, 64, 130, 68, 122, 93, 74, 32, 37, 0, 0}, 112, 208},
-	{{113, 90, 253, 149, 3, 218, 34, 215, 3, 143, 192, 64, 0, 0, 0, 0}, 90, 25},
-	{{75, 231, 33, 5, 11, 94, 117, 104, 150, 60, 72, 161, 96, 38, 0, 0}, 111, 50},
-	{{52, 13, 248, 1, 251, 14, 50, 29, 212, 123, 130, 177, 101, 96, 0, 0}, 109, 110},
-	{{248, 221, 150, 132, 252, 82, 96, 2, 80, 232, 97, 239, 253, 64, 0, 0}, 109, 21},
-	{{136, 77, 164, 161, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 147},
-	{{1, 33, 66, 254, 144, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 56},
-	{{181, 25, 186, 225, 109, 190, 76, 158, 118, 122, 20, 64, 125, 55, 8, 0}, 117, 144},
-	{{191, 187, 160, 140, 17, 6, 80, 120, 236, 212, 104, 144, 128, 0, 0, 0}, 100, 198},
-	{{201, 61, 150, 254, 70, 77, 214, 211, 171, 163, 245, 64, 0, 0, 0, 0}, 90, 235},
-	{{143, 226, 190, 50, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 105},
-	{{65, 168, 226, 36, 201, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 138},
-	{{136, 40, 65, 90, 47, 16, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 122},
-	{{94, 189, 224, 200, 170, 11, 79, 172, 0, 0, 0, 0, 0, 0, 0, 0}, 65, 193},
-	{{236, 41, 169, 234, 14, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 231},
-	{{1, 40, 140, 95, 81, 173, 250, 248, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 250},
-	{{83, 176, 146, 112, 89, 156, 57, 220, 125, 48, 44, 0, 0, 0, 0, 0}, 86, 24},
-	{{76, 125, 228, 249, 243, 160, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 191},
-	{{10, 203, 204, 49, 212, 115, 125, 4, 239, 122, 81, 34, 1, 198, 216, 0}, 117, 111},
-	{{74, 214, 23, 44, 211, 40, 161, 61, 237, 190, 155, 59, 173, 42, 0, 0}, 111, 205},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 133},
-	{{127, 0, 130, 61, 209, 5, 232, 35, 35, 42, 114, 52, 169, 234, 191, 0}, 122, 122},
-	{{201, 107, 210, 13, 187, 62, 145, 28, 31, 189, 56, 0, 0, 0, 0, 0}, 87, 227},
-	{{147, 171, 63, 145, 47, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 53},
-	{{93, 232, 10, 97, 21, 243, 213, 135, 200, 0, 0, 0, 0, 0, 0, 0}, 72, 224},
-	{{144, 121, 41, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 199},
-	{{116, 105, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 79},
-	{{142, 149, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 19},
-	{{97, 0, 228, 158, 50, 233, 251, 249, 0, 66, 197, 226, 0, 0, 0, 0}, 96, 211},
-	{{114, 228, 199, 155, 175, 104, 26, 213, 66, 249, 120, 218, 164, 252, 212, 0}, 120, 6},
-	{{224, 166, 76, 200, 121, 60, 110, 65, 60, 95, 137, 190, 92, 218, 218, 0}, 121, 143},
-	{{139, 219, 92, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 135},
-	{{203, 237, 64, 189, 28, 13, 75, 197, 219, 243, 172, 3, 142, 32, 0, 0}, 109, 21},
-	{{237, 186, 88, 254, 124, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 220},
-	{{182, 230, 93, 162, 129, 25, 56, 196, 112, 0, 0, 0, 0, 0, 0, 0}, 68, 151},
-	{{245, 45, 69, 226, 90, 212, 254, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 111},
-	{{107, 229, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 63},
-	{{119, 208, 177, 235, 222, 252, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 112},
-	{{178, 151, 220, 162, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 48},
-	{{109, 26, 95, 170, 166, 151, 137, 83, 226, 82, 5, 114, 253, 210, 18, 12}, 126, 100},
-	{{126, 27, 252, 19, 219, 129, 121, 48, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 156},
-	{{211, 195, 152, 145, 154, 93, 228, 215, 135, 101, 28, 82, 0, 0, 0, 0}, 95, 120},
-	{{252, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 5},
-	{{192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 103},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 84},
-	{{225, 179, 43, 43, 222, 145, 205, 238, 164, 158, 147, 229, 56, 0, 0, 0}, 101, 24},
-	{{208, 127, 151, 24, 64, 113, 47, 85, 209, 79, 144, 0, 0, 0, 0, 0}, 86, 81},
-	{{178, 144, 203, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 96},
-	{{56, 227, 139, 4, 86, 87, 180, 1, 215, 167, 237, 156, 111, 64, 47, 0}, 121, 6},
-	{{80, 76, 204, 119, 172, 169, 254, 81, 104, 166, 219, 44, 173, 161, 212, 0}, 119, 40},
-	{{129, 141, 139, 34, 241, 101, 223, 144, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 143},
-	{{85, 102, 137, 98, 65, 103, 54, 142, 144, 0, 0, 0, 0, 0, 0, 0}, 68, 69},
-	{{56, 31, 159, 13, 201, 139, 161, 31, 89, 137, 4, 0, 0, 0, 0, 0}, 92, 48},
-	{{229, 221, 54, 216, 223, 27, 196, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 115},
-	{{5, 144, 176, 43, 180, 187, 20, 49, 59, 73, 108, 34, 83, 32, 192, 0}, 115, 130},
-	{{24, 217, 205, 193, 74, 123, 160, 106, 103, 74, 200, 0, 0, 0, 0, 0}, 86, 57},
-	{{247, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 97},
-	{{12, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 146},
-	{{160, 28, 201, 119, 148, 93, 251, 118, 28, 179, 123, 52, 71, 232, 48, 0}, 117, 194},
-	{{152, 126, 17, 54, 101, 56, 130, 1, 205, 41, 207, 90, 151, 123, 128, 0}, 114, 129},
-	{{77, 165, 29, 239, 95, 242, 34, 1, 11, 204, 135, 239, 128, 0, 0, 0}, 97, 159},
-	{{183, 108, 146, 118, 74, 190, 7, 141, 9, 92, 2, 2, 8, 218, 120, 0}, 117, 242},
-	{{37, 152, 29, 239, 242, 53, 56, 143, 219, 22, 14, 158, 49, 0, 0, 0}, 104, 162},
-	{{198, 53, 241, 102, 240, 244, 97, 203, 62, 128, 213, 214, 220, 0, 0, 0}, 102, 140},
-	{{144, 89, 48, 42, 249, 231, 189, 178, 232, 199, 30, 58, 63, 57, 0, 0}, 113, 77},
-	{{68, 212, 177, 123, 44, 224, 19, 172, 89, 87, 192, 0, 0, 0, 0, 0}, 82, 121},
-	{{252, 29, 179, 224, 4, 121, 205, 67, 152, 0, 0, 0, 0, 0, 0, 0}, 69, 102},
-	{{28, 110, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 28},
-	{{24, 88, 231, 1, 4, 71, 71, 241, 252, 14, 197, 0, 0, 0, 0, 0}, 89, 154},
-	{{63, 131, 43, 76, 58, 140, 163, 74, 158, 80, 0, 0, 0, 0, 0, 0}, 76, 39},
-	{{56, 28, 147, 149, 98, 93, 216, 216, 203, 156, 0, 0, 0, 0, 0, 0}, 78, 163},
-	{{134, 169, 6, 103, 161, 244, 134, 117, 16, 0, 0, 0, 0, 0, 0, 0}, 68, 42},
-	{{143, 247, 125, 190, 106, 50, 204, 98, 250, 151, 161, 96, 0, 0, 0, 0}, 92, 207},
-	{{235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 25},
-	{{46, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 150},
-	{{171, 35, 128, 117, 74, 29, 199, 67, 109, 176, 0, 0, 0, 0, 0, 0}, 76, 103},
-	{{220, 233, 236, 112, 135, 136, 215, 43, 42, 0, 0, 0, 0, 0, 0, 0}, 71, 155},
-	{{228, 11, 144, 117, 206, 192, 118, 25, 141, 78, 4, 105, 0, 0, 0, 0}, 96, 142},
-	{{195, 67, 194, 229, 14, 53, 129, 7, 30, 208, 38, 100, 182, 59, 0, 0}, 112, 2},
-	{{25, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 59},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 112},
-	{{26, 203, 217, 152, 16, 187, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 166},
-	{{250, 213, 14, 235, 110, 171, 174, 23, 102, 128, 0, 0, 0, 0, 0, 0}, 73, 62},
-	{{175, 230, 160, 13, 187, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 176},
-	{{92, 155, 156, 93, 191, 73, 28, 82, 187, 129, 57, 5, 16, 0, 0, 0}, 100, 6},
-	{{45, 203, 3, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 26},
-	{{120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 6},
-	{{216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 13},
-	{{135, 215, 0, 71, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 41},
-	{{221, 149, 1, 40, 112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 135},
-	{{95, 143, 255, 194, 2, 157, 191, 113, 10, 229, 204, 56, 0, 0, 0, 0}, 93, 171},
-	{{202, 212, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 20},
-	{{147, 203, 238, 120, 194, 23, 25, 58, 208, 177, 169, 0, 0, 0, 0, 0}, 89, 119},
-	{{137, 170, 113, 252, 215, 194, 224, 146, 233, 87, 86, 192, 26, 46, 0, 0}, 112, 49},
-	{{224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 141},
-	{{250, 90, 241, 174, 163, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 132},
-	{{66, 190, 202, 144, 122, 86, 22, 103, 107, 164, 57, 54, 228, 128, 0, 0}, 105, 176},
-	{{76, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 186},
-	{{120, 246, 1, 52, 187, 163, 78, 105, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 93},
-	{{137, 242, 136, 71, 98, 10, 53, 97, 160, 85, 132, 127, 185, 222, 0, 0}, 111, 242},
-	{{255, 133, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 163},
-	{{128, 177, 92, 155, 91, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 184},
-	{{45, 120, 186, 192, 240, 199, 178, 95, 32, 0, 0, 0, 0, 0, 0, 0}, 68, 188},
-	{{151, 98, 103, 254, 90, 6, 10, 109, 14, 158, 69, 29, 140, 237, 40, 232}, 126, 193},
-	{{148, 164, 81, 85, 76, 14, 84, 64, 89, 176, 0, 0, 0, 0, 0, 0}, 78, 63},
-	{{145, 187, 165, 136, 88, 30, 107, 191, 205, 120, 119, 216, 158, 123, 64, 0}, 115, 160},
-	{{78, 120, 28, 243, 216, 180, 87, 19, 253, 16, 110, 33, 228, 24, 232, 0}, 117, 251},
-	{{74, 6, 166, 166, 183, 157, 96, 84, 151, 0, 0, 0, 0, 0, 0, 0}, 72, 228},
-	{{89, 96, 4, 221, 214, 253, 58, 49, 9, 0, 0, 0, 0, 0, 0, 0}, 72, 168},
-	{{97, 9, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 194},
-	{{213, 215, 45, 200, 170, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 166},
-	{{5, 14, 92, 0, 28, 245, 130, 202, 32, 40, 207, 77, 166, 170, 246, 64}, 122, 210},
-	{{77, 45, 43, 71, 202, 0, 157, 146, 59, 91, 225, 0, 0, 0, 0, 0}, 89, 254},
-	{{101, 174, 94, 168, 162, 171, 71, 12, 16, 224, 0, 0, 0, 0, 0, 0}, 75, 49},
-	{{58, 17, 187, 194, 87, 73, 215, 103, 180, 12, 40, 66, 0, 0, 0, 0}, 96, 95},
-	{{160, 91, 68, 81, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 193},
-	{{94, 112, 249, 13, 167, 245, 101, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 155},
-	{{236, 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 15, 133},
-	{{168, 243, 103, 221, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 10},
-	{{86, 194, 218, 188, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 31},
-	{{232, 3, 134, 67, 63, 196, 86, 14, 170, 243, 77, 134, 187, 140, 72, 18}, 127, 98},
-	{{55, 253, 19, 201, 199, 71, 229, 218, 54, 64, 12, 162, 0, 0, 0, 0}, 96, 22},
-	{{142, 34, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 214},
-	{{213, 16, 208, 50, 100, 33, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 217},
-	{{117, 237, 132, 185, 184, 246, 79, 42, 103, 98, 162, 243, 128, 0, 0, 0}, 98, 102},
-	{{120, 25, 214, 222, 61, 157, 203, 102, 3, 146, 192, 0, 0, 0, 0, 0}, 83, 169},
-	{{222, 46, 254, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 152},
-	{{254, 70, 158, 171, 11, 245, 223, 97, 70, 17, 27, 192, 186, 0, 0, 0}, 103, 214},
-	{{192, 128, 228, 17, 68, 20, 44, 31, 52, 34, 212, 1, 224, 0, 0, 0}, 99, 178},
-	{{237, 229, 203, 8, 121, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 164},
-	{{6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 15},
-	{{71, 197, 251, 122, 138, 232, 12, 241, 116, 240, 0, 0, 0, 0, 0, 0}, 76, 94},
-	{{18, 241, 135, 210, 233, 54, 121, 185, 4, 0, 0, 0, 0, 0, 0, 0}, 70, 239},
-	{{32, 50, 213, 63, 73, 217, 180, 21, 187, 128, 0, 0, 0, 0, 0, 0}, 73, 82},
-	{{203, 166, 233, 73, 92, 182, 212, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 54},
-	{{56, 162, 126, 4, 18, 195, 192, 64, 164, 156, 119, 196, 64, 0, 0, 0}, 98, 47},
-	{{120, 87, 81, 136, 180, 179, 68, 148, 243, 38, 80, 0, 0, 0, 0, 0}, 84, 214},
-	{{64, 244, 193, 50, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 215},
-	{{91, 168, 253, 158, 131, 83, 159, 163, 113, 169, 112, 0, 0, 0, 0, 0}, 84, 153},
-	{{159, 103, 102, 132, 111, 46, 18, 77, 36, 15, 137, 33, 177, 31, 243, 192}, 122, 245},
-	{{123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 118},
-	{{67, 81, 226, 190, 7, 79, 71, 250, 155, 245, 44, 81, 215, 213, 171, 224}, 123, 128},
-	{{103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 7},
-	{{246, 44, 168, 200, 198, 238, 52, 196, 125, 115, 0, 0, 0, 0, 0, 0}, 80, 152},
-	{{205, 14, 186, 252, 239, 213, 59, 119, 105, 37, 140, 209, 4, 231, 0, 0}, 114, 248},
-	{{70, 91, 254, 106, 94, 71, 170, 19, 158, 242, 192, 0, 0, 0, 0, 0}, 85, 143},
-	{{250, 86, 233, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 159},
-	{{122, 222, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 11},
-	{{27, 224, 235, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 110},
-	{{239, 100, 224, 3, 46, 127, 150, 251, 204, 120, 228, 64, 0, 0, 0, 0}, 97, 181},
-	{{144, 115, 182, 206, 146, 13, 21, 111, 37, 70, 179, 129, 173, 82, 93, 128}, 121, 4},
-	{{73, 190, 57, 243, 49, 51, 15, 209, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 101},
-	{{18, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 38},
-	{{23, 37, 236, 177, 186, 7, 209, 135, 114, 44, 0, 0, 0, 0, 0, 0}, 78, 57},
-	{{200, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 142},
-	{{181, 255, 153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 184},
-	{{135, 168, 6, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 91},
-	{{200, 224, 33, 245, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 224},
-	{{70, 111, 10, 62, 200, 224, 38, 204, 14, 164, 0, 0, 0, 0, 0, 0}, 78, 114},
-	{{158, 133, 252, 18, 242, 12, 16, 60, 5, 52, 251, 179, 38, 235, 12, 0}, 118, 184},
-	{{2, 23, 116, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 215},
-	{{33, 25, 170, 74, 215, 134, 151, 181, 175, 232, 20, 155, 189, 242, 13, 0}, 120, 167},
-	{{160, 186, 218, 183, 167, 84, 59, 152, 13, 137, 80, 128, 0, 0, 0, 0}, 89, 233},
-	{{32, 141, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 101},
-	{{207, 24, 202, 226, 191, 136, 78, 124, 160, 0, 0, 0, 0, 0, 0, 0}, 67, 139},
-	{{210, 173, 172, 27, 197, 57, 114, 146, 169, 32, 0, 0, 0, 0, 0, 0}, 79, 32},
-	{{95, 113, 12, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 57},
-	{{129, 108, 186, 28, 19, 229, 96, 134, 199, 254, 199, 64, 0, 0, 0, 0}, 91, 151},
-	{{103, 226, 38, 123, 35, 199, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 0},
-	{{41, 117, 43, 35, 208, 115, 73, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 227},
-	{{42, 220, 61, 34, 199, 183, 42, 16, 223, 135, 0, 135, 213, 150, 100, 0}, 118, 124},
-	{{165, 227, 96, 243, 112, 171, 117, 106, 50, 37, 82, 60, 80, 0, 0, 0}, 104, 228},
-	{{158, 60, 111, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 64},
-	{{124, 108, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 179},
-	{{232, 68, 132, 159, 156, 103, 95, 190, 76, 0, 0, 0, 0, 0, 0, 0}, 70, 107},
-	{{70, 77, 240, 209, 72, 63, 63, 45, 125, 79, 77, 41, 13, 0, 0, 0}, 104, 206},
-	{{146, 254, 7, 5, 68, 240, 67, 237, 112, 0, 0, 0, 0, 0, 0, 0}, 68, 95},
-	{{162, 223, 117, 27, 2, 156, 94, 170, 157, 114, 162, 50, 0, 0, 0, 0}, 96, 219},
-	{{161, 62, 191, 68, 239, 73, 100, 37, 168, 254, 139, 202, 252, 65, 74, 0}, 119, 138},
-	{{248, 122, 115, 81, 15, 158, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 84},
-	{{8, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 161},
-	{{142, 96, 105, 133, 251, 57, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 25},
-	{{138, 196, 139, 131, 233, 93, 65, 242, 86, 169, 7, 72, 82, 128, 0, 0}, 107, 113},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 46},
-	{{175, 151, 75, 238, 26, 12, 100, 186, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 72},
-	{{82, 205, 211, 176, 170, 79, 57, 153, 161, 218, 32, 48, 0, 0, 0, 0}, 93, 230},
-	{{227, 123, 232, 74, 236, 202, 211, 121, 200, 8, 59, 189, 81, 219, 144, 0}, 117, 142},
-	{{205, 196, 89, 90, 103, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 134},
-	{{63, 145, 23, 127, 102, 216, 49, 36, 168, 164, 59, 133, 18, 146, 0, 0}, 112, 100},
-	{{213, 72, 154, 16, 230, 236, 218, 203, 223, 51, 31, 251, 103, 64, 0, 0}, 109, 45},
-	{{126, 148, 232, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 219},
-	{{160, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 52},
-	{{137, 38, 146, 20, 99, 188, 83, 123, 159, 159, 64, 0, 0, 0, 0, 0}, 83, 240},
-	{{123, 228, 36, 44, 242, 29, 51, 228, 140, 60, 237, 0, 0, 0, 0, 0}, 90, 13},
-	{{163, 169, 25, 89, 190, 114, 165, 158, 140, 210, 192, 0, 0, 0, 0, 0}, 84, 191},
-	{{225, 38, 70, 89, 218, 236, 60, 5, 69, 163, 248, 50, 163, 64, 0, 0}, 106, 95},
-	{{91, 94, 36, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 65},
-	{{209, 238, 110, 0, 2, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 195},
-	{{57, 17, 224, 164, 69, 95, 138, 172, 111, 55, 239, 167, 160, 0, 0, 0}, 103, 21},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 114},
-	{{102, 96, 223, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 92},
-	{{137, 204, 150, 75, 193, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 237},
-	{{136, 56, 252, 240, 85, 48, 248, 231, 17, 49, 47, 238, 15, 233, 159, 184}, 125, 172},
-	{{57, 31, 132, 123, 234, 255, 37, 82, 167, 204, 37, 158, 128, 0, 0, 0}, 98, 116},
-	{{55, 198, 139, 219, 161, 156, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 54},
-	{{44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 203},
-	{{53, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 74},
-	{{227, 62, 107, 236, 118, 156, 60, 34, 31, 179, 76, 221, 0, 0, 0, 0}, 96, 220},
-	{{105, 40, 240, 216, 91, 61, 19, 128, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 219},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 179},
-	{{118, 142, 251, 249, 128, 105, 113, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 194},
-	{{101, 70, 196, 238, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 187},
-	{{245, 173, 165, 177, 200, 161, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 79},
-	{{0, 198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 87},
-	{{92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 126},
-	{{125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 106},
-	{{56, 59, 35, 82, 101, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 96},
-	{{184, 72, 77, 251, 8, 166, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 45},
-	{{143, 74, 132, 205, 218, 247, 30, 160, 145, 199, 138, 12, 89, 220, 0, 0}, 110, 8},
-	{{30, 178, 111, 225, 73, 79, 173, 52, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 226},
-	{{224, 48, 154, 231, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 222},
-	{{123, 144, 170, 143, 85, 169, 130, 245, 214, 0, 0, 0, 0, 0, 0, 0}, 71, 218},
-	{{166, 224, 212, 100, 149, 55, 35, 210, 246, 108, 41, 245, 127, 174, 128, 0}, 116, 59},
-	{{75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 80},
-	{{197, 128, 190, 87, 47, 53, 92, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 177},
-	{{249, 10, 76, 217, 225, 20, 124, 205, 44, 159, 190, 8, 0, 0, 0, 0}, 98, 44},
-	{{180, 226, 0, 167, 137, 232, 174, 120, 113, 95, 22, 184, 0, 0, 0, 0}, 93, 206},
-	{{123, 153, 102, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 64},
-	{{5, 144, 206, 158, 239, 189, 171, 120, 69, 46, 128, 237, 0, 0, 0, 0}, 96, 236},
-	{{159, 235, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 101},
-	{{42, 194, 150, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 49},
-	{{205, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 179},
-	{{19, 65, 141, 20, 127, 77, 70, 205, 151, 115, 157, 23, 118, 128, 0, 0}, 109, 112},
-	{{96, 11, 214, 40, 245, 251, 61, 64, 128, 241, 183, 183, 0, 0, 0, 0}, 96, 31},
-	{{120, 4, 235, 112, 34, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 111},
-	{{110, 127, 207, 76, 100, 148, 130, 206, 249, 2, 104, 0, 0, 0, 0, 0}, 86, 65},
-	{{226, 190, 191, 249, 173, 96, 127, 200, 62, 20, 0, 0, 0, 0, 0, 0}, 78, 222},
-	{{89, 88, 182, 14, 78, 122, 213, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 4},
-	{{167, 94, 163, 227, 28, 111, 117, 103, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 67},
-	{{57, 220, 53, 116, 243, 184, 242, 134, 16, 70, 83, 61, 161, 128, 0, 0}, 109, 197},
-	{{63, 235, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 121},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 167},
-	{{15, 159, 42, 167, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 140},
-	{{216, 252, 113, 40, 239, 46, 172, 48, 103, 250, 82, 179, 136, 64, 0, 0}, 106, 193},
-	{{158, 147, 16, 44, 124, 56, 44, 48, 138, 64, 169, 0, 0, 0, 0, 0}, 90, 47},
-	{{238, 238, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 187},
-	{{63, 159, 177, 162, 106, 212, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 102},
-	{{59, 40, 252, 185, 187, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 237},
-	{{2, 218, 11, 68, 173, 196, 16, 223, 2, 18, 122, 215, 154, 0, 0, 0}, 103, 237},
-	{{3, 9, 206, 73, 108, 196, 183, 119, 141, 162, 10, 180, 115, 32, 0, 0}, 107, 115},
-	{{17, 227, 208, 146, 63, 201, 73, 239, 29, 79, 80, 0, 0, 0, 0, 0}, 84, 217},
-	{{115, 180, 176, 241, 52, 209, 6, 64, 189, 76, 0, 0, 0, 0, 0, 0}, 79, 21},
-	{{191, 88, 98, 245, 91, 46, 137, 254, 170, 80, 11, 55, 212, 28, 128, 0}, 113, 3},
-	{{97, 141, 171, 175, 22, 233, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 62},
-	{{32, 204, 102, 191, 164, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 80},
-	{{29, 133, 210, 252, 124, 66, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 184},
-	{{207, 179, 54, 144, 116, 67, 29, 64, 13, 199, 0, 0, 0, 0, 0, 0}, 80, 197},
-	{{129, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 63},
-	{{50, 152, 249, 143, 174, 234, 240, 48, 158, 255, 80, 105, 0, 0, 0, 0}, 99, 62},
-	{{105, 208, 95, 218, 44, 11, 87, 134, 109, 18, 138, 66, 17, 69, 128, 0}, 114, 231},
-	{{151, 79, 158, 220, 122, 101, 210, 164, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 158},
-	{{236, 97, 87, 155, 254, 137, 122, 208, 168, 201, 194, 118, 224, 0, 0, 0}, 101, 118},
-	{{14, 229, 193, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 237},
-	{{46, 154, 50, 80, 92, 147, 158, 86, 1, 112, 0, 0, 0, 0, 0, 0}, 79, 15},
-	{{88, 131, 21, 84, 62, 86, 7, 110, 142, 251, 242, 110, 194, 175, 247, 0}, 122, 84},
-	{{229, 216, 111, 92, 173, 32, 63, 70, 36, 84, 6, 74, 136, 166, 38, 0}, 119, 205},
-	{{121, 147, 216, 245, 37, 189, 146, 63, 145, 74, 128, 0, 0, 0, 0, 0}, 82, 220},
-	{{44, 26, 254, 11, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 42},
-	{{209, 114, 97, 249, 227, 159, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 144},
-	{{184, 244, 43, 117, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 74},
-	{{60, 81, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 89},
-	{{18, 40, 21, 113, 226, 91, 195, 88, 161, 19, 142, 0, 0, 0, 0, 0}, 88, 77},
-	{{57, 0, 212, 158, 56, 51, 108, 198, 59, 5, 137, 196, 0, 0, 0, 0}, 94, 2},
-	{{168, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 75},
-	{{64, 181, 254, 103, 1, 230, 117, 199, 128, 0, 0, 0, 0, 0, 0, 0}, 65, 18},
-	{{212, 48, 214, 127, 78, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 246},
-	{{155, 185, 236, 163, 204, 49, 129, 120, 183, 47, 10, 243, 65, 92, 192, 0}, 114, 10},
-	{{94, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 207},
-	{{19, 210, 136, 113, 73, 79, 132, 196, 224, 0, 0, 0, 0, 0, 0, 0}, 68, 41},
-	{{24, 203, 246, 242, 241, 223, 150, 237, 213, 202, 11, 128, 0, 0, 0, 0}, 89, 102},
-	{{115, 59, 171, 221, 172, 181, 170, 67, 115, 205, 44, 107, 162, 67, 56, 0}, 118, 118},
-	{{250, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 146},
-	{{203, 240, 28, 158, 182, 12, 86, 182, 142, 47, 143, 57, 239, 0, 0, 0}, 104, 122},
-	{{196, 218, 109, 52, 2, 0, 64, 153, 34, 250, 240, 185, 117, 0, 0, 0}, 107, 6},
-	{{137, 131, 191, 40, 72, 209, 74, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 18},
-	{{236, 126, 167, 37, 185, 20, 34, 207, 76, 0, 0, 0, 0, 0, 0, 0}, 70, 83},
-	{{129, 192, 245, 137, 251, 52, 75, 68, 81, 112, 146, 133, 64, 0, 0, 0}, 99, 90},
-	{{7, 31, 148, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 140},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 242},
-	{{167, 50, 202, 179, 74, 146, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 31},
-	{{44, 188, 186, 250, 229, 71, 28, 118, 35, 253, 245, 191, 199, 18, 0, 0}, 111, 9},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 230},
-	{{156, 163, 215, 175, 71, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 50},
-	{{67, 24, 151, 198, 242, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41, 34},
-	{{134, 107, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 11},
-	{{35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 71},
-	{{46, 196, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 146},
-	{{82, 172, 8, 26, 154, 34, 125, 188, 5, 149, 159, 44, 78, 222, 236, 176}, 124, 249},
-	{{78, 157, 79, 70, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 143},
-	{{231, 5, 210, 247, 198, 5, 157, 191, 206, 225, 149, 142, 207, 40, 0, 0}, 110, 17},
-	{{38, 254, 235, 199, 191, 60, 43, 159, 190, 243, 203, 185, 184, 218, 132, 0}, 119, 60},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 162},
-	{{95, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 5},
-	{{17, 128, 244, 178, 160, 78, 83, 92, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 139},
-	{{18, 102, 62, 251, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 8},
-	{{30, 75, 108, 40, 231, 166, 233, 220, 163, 176, 252, 210, 60, 30, 128, 0}, 114, 246},
-	{{18, 3, 207, 64, 25, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 171},
-	{{52, 83, 235, 61, 164, 236, 83, 173, 143, 105, 14, 0, 0, 0, 0, 0}, 88, 206},
-	{{166, 175, 186, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 163},
-	{{221, 154, 82, 98, 41, 126, 85, 52, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 166},
-	{{94, 84, 182, 120, 204, 232, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 128},
-	{{27, 174, 227, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 59},
-	{{218, 12, 4, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 179},
-	{{9, 5, 190, 195, 60, 216, 80, 150, 128, 117, 86, 128, 128, 112, 98, 208}, 124, 87},
-	{{7, 226, 104, 112, 212, 9, 172, 124, 209, 121, 170, 229, 44, 178, 128, 0}, 114, 29},
-	{{47, 71, 174, 76, 52, 83, 23, 18, 106, 48, 56, 32, 0, 0, 0, 0}, 91, 184},
-	{{51, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 45},
-	{{28, 182, 167, 124, 28, 22, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 144},
-	{{34, 61, 14, 51, 253, 17, 19, 170, 49, 206, 188, 207, 247, 167, 192, 0}, 114, 119},
-	{{2, 235, 18, 14, 195, 66, 237, 30, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 113},
-	{{51, 182, 142, 133, 127, 96, 159, 132, 99, 161, 64, 0, 0, 0, 0, 0}, 82, 50},
-	{{170, 145, 230, 123, 215, 189, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 207},
-	{{151, 166, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 3},
-	{{16, 141, 196, 129, 132, 207, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 13},
-	{{205, 25, 184, 191, 201, 206, 109, 224, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 42},
-	{{48, 114, 33, 103, 247, 255, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 31},
-	{{179, 156, 119, 146, 125, 21, 42, 146, 237, 213, 191, 132, 0, 0, 0, 0}, 94, 30},
-	{{179, 129, 186, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 94},
-	{{17, 179, 217, 188, 128, 212, 4, 4, 152, 0, 0, 0, 0, 0, 0, 0}, 71, 190},
-	{{132, 63, 74, 89, 209, 64, 63, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 238},
-	{{16, 50, 248, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 20},
-	{{189, 96, 58, 53, 191, 235, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 84},
-	{{111, 98, 6, 65, 35, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 108},
-	{{118, 223, 83, 220, 110, 122, 23, 112, 185, 155, 73, 0, 0, 0, 0, 0}, 89, 136},
-	{{173, 191, 150, 197, 204, 35, 169, 79, 31, 214, 251, 240, 0, 0, 0, 0}, 93, 196},
-	{{26, 76, 129, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 67},
-	{{231, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 12, 104},
-	{{93, 172, 223, 252, 203, 0, 206, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 15},
-	{{53, 142, 203, 124, 104, 51, 241, 12, 161, 17, 101, 245, 120, 110, 192, 199}, 128, 237},
-	{{9, 77, 120, 197, 193, 10, 237, 174, 233, 2, 165, 11, 229, 47, 144, 0}, 116, 224},
-	{{99, 161, 189, 88, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 179},
-	{{18, 8, 76, 66, 2, 185, 206, 132, 224, 0, 0, 0, 0, 0, 0, 0}, 67, 84},
-	{{169, 53, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 65},
-	{{136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 178},
-	{{131, 162, 144, 124, 12, 98, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 154},
-	{{75, 50, 129, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 27, 106},
-	{{212, 183, 40, 225, 152, 136, 174, 91, 0, 0, 0, 0, 0, 0, 0, 0}, 67, 125},
-	{{158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 118},
-	{{7, 48, 132, 149, 169, 212, 198, 137, 202, 0, 0, 0, 0, 0, 0, 0}, 73, 52},
-	{{173, 195, 129, 163, 141, 249, 40, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 173},
-	{{109, 79, 75, 219, 205, 182, 22, 245, 223, 17, 146, 78, 109, 119, 128, 0}, 113, 8},
-	{{174, 195, 24, 182, 215, 198, 214, 86, 34, 128, 0, 0, 0, 0, 0, 0}, 74, 211},
-	{{22, 40, 51, 109, 70, 91, 152, 56, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 253},
-	{{169, 115, 246, 126, 65, 118, 219, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 47},
-	{{154, 37, 70, 124, 107, 123, 232, 241, 164, 142, 71, 226, 182, 126, 0, 0}, 112, 73},
-	{{6, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 192},
-	{{216, 167, 158, 158, 222, 19, 96, 28, 40, 6, 70, 12, 147, 27, 85, 240}, 128, 55},
-	{{72, 222, 52, 69, 69, 206, 163, 106, 235, 206, 80, 128, 0, 0, 0, 0}, 94, 147},
-	{{150, 112, 106, 56, 15, 243, 154, 97, 134, 110, 160, 20, 183, 144, 234, 8}, 125, 86},
-	{{58, 186, 106, 58, 124, 171, 53, 85, 33, 100, 64, 0, 0, 0, 0, 0}, 82, 16},
-	{{7, 195, 22, 31, 62, 217, 209, 46, 90, 49, 189, 50, 168, 126, 0, 0}, 111, 167},
-	{{92, 44, 159, 198, 185, 94, 231, 177, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 148},
-	{{169, 108, 190, 162, 23, 39, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 66},
-	{{161, 5, 3, 11, 158, 157, 166, 212, 246, 22, 140, 101, 92, 0, 0, 0}, 104, 70},
-	{{71, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 166},
-	{{48, 136, 194, 145, 57, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 109},
-	{{144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 226},
-	{{223, 209, 10, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 8},
-	{{154, 79, 170, 9, 43, 139, 249, 176, 186, 72, 216, 0, 0, 0, 0, 0}, 85, 218},
-	{{1, 8, 123, 205, 167, 134, 128, 102, 10, 72, 0, 0, 0, 0, 0, 0}, 78, 54},
-	{{31, 105, 48, 77, 103, 187, 99, 67, 96, 0, 0, 0, 0, 0, 0, 0}, 67, 48},
-	{{14, 73, 54, 76, 232, 35, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 244},
-	{{14, 109, 251, 190, 36, 253, 99, 120, 94, 64, 0, 0, 0, 0, 0, 0}, 74, 50},
-	{{122, 170, 9, 134, 124, 91, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 173},
-	{{246, 10, 85, 88, 82, 217, 95, 56, 216, 203, 160, 0, 0, 0, 0, 0}, 84, 245},
-	{{77, 100, 114, 207, 150, 177, 69, 134, 74, 131, 147, 117, 177, 64, 210, 128}, 121, 54},
-	{{171, 123, 22, 138, 132, 229, 250, 81, 186, 227, 146, 27, 170, 205, 128, 0}, 113, 86},
-	{{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 115},
-	{{12, 35, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 144},
-	{{255, 124, 179, 165, 169, 250, 66, 171, 223, 125, 247, 0, 0, 0, 0, 0}, 89, 171},
-	{{244, 235, 211, 10, 251, 255, 206, 6, 198, 12, 50, 136, 0, 0, 0, 0}, 93, 231},
-	{{221, 77, 237, 41, 50, 33, 103, 24, 25, 127, 208, 0, 0, 0, 0, 0}, 88, 34},
-	{{216, 69, 47, 53, 117, 24, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 225},
-	{{180, 87, 25, 236, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 174},
-	{{110, 32, 24, 34, 116, 133, 245, 128, 123, 95, 125, 122, 100, 129, 128, 0}, 113, 37},
-	{{27, 117, 179, 112, 133, 137, 110, 193, 246, 201, 219, 65, 56, 234, 106, 128}, 121, 39},
-	{{186, 117, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 59},
-	{{243, 119, 54, 16, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 96},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 147},
-	{{78, 48, 117, 200, 245, 118, 115, 240, 170, 125, 84, 103, 33, 168, 0, 0}, 110, 56},
-	{{201, 253, 184, 254, 143, 81, 95, 42, 243, 147, 96, 145, 23, 26, 0, 0}, 111, 234},
-	{{41, 215, 84, 136, 234, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 199},
-	{{91, 244, 137, 184, 231, 95, 135, 10, 184, 0, 0, 0, 0, 0, 0, 0}, 69, 191},
-	{{113, 31, 181, 245, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 235},
-	{{181, 216, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 45},
-	{{87, 26, 119, 229, 97, 255, 9, 43, 32, 0, 0, 0, 0, 0, 0, 0}, 67, 164},
-	{{205, 112, 67, 163, 196, 148, 5, 105, 8, 138, 144, 3, 171, 213, 159, 128}, 121, 130},
-	{{136, 27, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 166},
-	{{2, 175, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 140},
-	{{222, 131, 85, 218, 16, 229, 44, 230, 243, 76, 250, 139, 1, 203, 108, 0}, 118, 47},
-	{{101, 180, 77, 142, 194, 73, 196, 246, 107, 100, 194, 72, 204, 124, 0, 0}, 111, 148},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 103},
-	{{46, 62, 191, 130, 110, 128, 235, 62, 68, 39, 58, 152, 207, 204, 96, 0}, 116, 94},
-	{{111, 11, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 85},
-	{{58, 43, 14, 93, 102, 210, 117, 208, 222, 171, 130, 41, 16, 16, 0, 0}, 109, 250},
-	{{141, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 153},
-	{{170, 153, 160, 170, 144, 235, 122, 8, 106, 34, 24, 32, 102, 57, 12, 168}, 125, 182},
-	{{34, 113, 163, 107, 61, 177, 39, 172, 242, 2, 130, 0, 0, 0, 0, 0}, 94, 23},
-	{{222, 191, 239, 110, 162, 191, 195, 181, 80, 50, 85, 240, 88, 32, 0, 0}, 108, 38},
-	{{179, 82, 253, 151, 212, 0, 72, 253, 175, 22, 34, 78, 53, 32, 0, 0}, 110, 121},
-	{{10, 162, 20, 46, 164, 64, 88, 1, 202, 204, 124, 0, 0, 0, 0, 0}, 87, 146},
-	{{210, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 138},
-	{{183, 200, 1, 2, 51, 6, 66, 142, 20, 77, 48, 244, 0, 0, 0, 0}, 94, 149},
-	{{29, 20, 224, 57, 204, 161, 131, 254, 53, 133, 163, 0, 0, 0, 0, 0}, 88, 232},
-	{{75, 58, 170, 52, 146, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 255},
-	{{92, 21, 1, 113, 185, 88, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 148},
-	{{103, 180, 222, 187, 129, 117, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 117},
-	{{32, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 237},
-	{{7, 60, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 113},
-	{{167, 122, 205, 185, 21, 199, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 162},
-	{{21, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 225},
-	{{92, 159, 167, 169, 136, 176, 95, 255, 87, 137, 112, 16, 0, 0, 0, 0}, 92, 210},
-	{{84, 120, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 34},
-	{{126, 5, 126, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 224},
-	{{4, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 143},
-	{{239, 154, 181, 182, 189, 211, 244, 53, 144, 0, 0, 0, 0, 0, 0, 0}, 68, 216},
-	{{254, 188, 139, 167, 135, 47, 147, 239, 187, 106, 228, 156, 234, 234, 102, 0}, 120, 239},
-	{{225, 168, 138, 92, 193, 255, 47, 233, 11, 154, 205, 86, 209, 88, 0, 0}, 111, 54},
-	{{223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 35},
-	{{235, 252, 115, 10, 151, 104, 193, 207, 38, 228, 229, 245, 42, 13, 108, 0}, 119, 230},
-	{{1, 137, 53, 36, 210, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 234},
-	{{149, 182, 72, 197, 92, 229, 9, 10, 220, 128, 72, 19, 4, 58, 192, 0}, 115, 70},
-	{{105, 73, 57, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 246},
-	{{189, 61, 230, 24, 235, 82, 58, 102, 97, 111, 121, 252, 156, 94, 191, 166}, 127, 217},
-	{{193, 108, 231, 86, 140, 14, 192, 4, 135, 80, 129, 166, 158, 61, 230, 20}, 128, 201},
-	{{110, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 17, 49},
-	{{3, 102, 36, 231, 15, 242, 143, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 2},
-	{{81, 189, 220, 168, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 64},
-	{{168, 75, 133, 180, 91, 165, 77, 232, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 239},
-	{{106, 179, 186, 109, 81, 234, 233, 167, 101, 160, 90, 102, 174, 234, 208, 0}, 116, 47},
-	{{46, 105, 234, 21, 23, 247, 169, 33, 47, 5, 0, 0, 0, 0, 0, 0}, 80, 43},
-	{{152, 144, 100, 142, 129, 23, 227, 50, 67, 81, 249, 116, 0, 0, 0, 0}, 94, 17},
-	{{109, 74, 145, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 5},
-	{{100, 243, 22, 230, 38, 44, 128, 86, 132, 57, 0, 0, 0, 0, 0, 0}, 81, 240},
-	{{153, 251, 115, 65, 104, 179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 197},
-	{{43, 113, 60, 224, 36, 20, 42, 161, 24, 223, 192, 0, 0, 0, 0, 0}, 84, 192},
-	{{61, 77, 121, 176, 138, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 160},
-	{{119, 194, 146, 49, 59, 242, 25, 220, 122, 104, 80, 0, 0, 0, 0, 0}, 84, 199},
-	{{254, 162, 155, 47, 187, 3, 1, 114, 142, 191, 152, 44, 144, 26, 202, 0}, 127, 217},
-	{{176, 1, 114, 42, 191, 145, 43, 1, 141, 18, 64, 0, 0, 0, 0, 0}, 83, 75},
-	{{170, 244, 67, 132, 145, 163, 76, 213, 85, 237, 248, 22, 207, 64, 0, 0}, 106, 222},
-	{{102, 190, 58, 32, 75, 15, 89, 163, 64, 7, 168, 0, 0, 0, 0, 0}, 85, 39},
-	{{124, 170, 35, 47, 152, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 9},
-	{{192, 221, 20, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 217},
-	{{208, 178, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 142},
-	{{188, 68, 77, 30, 68, 153, 102, 180, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 18},
-	{{114, 178, 121, 188, 205, 233, 35, 77, 34, 197, 158, 174, 101, 0, 0, 0}, 104, 180},
-	{{195, 98, 67, 12, 13, 43, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 205},
-	{{146, 190, 42, 222, 14, 54, 28, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 251},
-	{{185, 202, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 178},
-	{{138, 30, 129, 95, 224, 161, 120, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 198},
-	{{69, 181, 5, 227, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 84},
-	{{90, 180, 0, 164, 227, 75, 174, 119, 128, 0, 0, 0, 0, 0, 0, 0}, 66, 128},
-	{{20, 60, 58, 119, 245, 177, 162, 186, 13, 112, 211, 239, 128, 0, 0, 0}, 97, 75},
-	{{158, 124, 157, 25, 230, 139, 51, 212, 76, 109, 236, 210, 48, 0, 0, 0}, 101, 192},
-	{{125, 108, 242, 36, 94, 13, 36, 106, 90, 51, 83, 217, 131, 151, 0, 0}, 114, 60},
-	{{222, 218, 162, 158, 15, 53, 191, 178, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 169},
-	{{104, 202, 127, 109, 73, 16, 17, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 10},
-	{{172, 171, 246, 26, 176, 34, 22, 152, 246, 56, 173, 120, 105, 60, 92, 0}, 118, 64},
-	{{190, 22, 171, 206, 109, 186, 179, 128, 253, 182, 108, 212, 220, 167, 171, 180}, 127, 182},
-	{{119, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 29},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 39},
-	{{170, 144, 64, 2, 107, 166, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 93},
-	{{234, 9, 96, 20, 156, 157, 1, 34, 88, 0, 0, 0, 0, 0, 0, 0}, 75, 228},
-	{{147, 237, 16, 120, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 236},
-	{{182, 189, 162, 158, 223, 90, 173, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 190},
-	{{116, 148, 142, 240, 10, 253, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 217},
-	{{211, 73, 140, 69, 252, 27, 75, 46, 37, 6, 147, 32, 0, 0, 0, 0}, 93, 74},
-	{{148, 61, 120, 49, 220, 65, 150, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 180},
-	{{172, 35, 202, 180, 129, 75, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 91},
-	{{215, 109, 147, 157, 32, 28, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 230},
-	{{151, 26, 182, 112, 205, 220, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 175},
-	{{73, 91, 93, 61, 196, 3, 66, 26, 149, 96, 0, 0, 0, 0, 0, 0}, 75, 171},
-	{{203, 163, 52, 247, 28, 119, 56, 223, 138, 70, 174, 97, 77, 59, 46, 0}, 120, 202},
-	{{251, 50, 228, 178, 202, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 113},
-	{{217, 159, 164, 199, 14, 237, 170, 184, 100, 231, 92, 222, 0, 0, 0, 0}, 96, 187},
-	{{16, 161, 85, 193, 202, 21, 3, 155, 63, 116, 124, 203, 34, 13, 215, 0}, 120, 38},
-	{{111, 52, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 35},
-	{{69, 12, 116, 151, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 115},
-	{{187, 60, 97, 40, 112, 101, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 18},
-	{{230, 194, 136, 255, 206, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 34},
-	{{179, 239, 170, 107, 3, 13, 212, 67, 177, 69, 8, 0, 0, 0, 0, 0}, 87, 75},
-	{{11, 58, 130, 89, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 232},
-	{{217, 178, 43, 203, 234, 20, 234, 186, 157, 88, 146, 192, 0, 0, 0, 0}, 91, 154},
-	{{6, 180, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 195},
-	{{157, 154, 218, 158, 39, 224, 103, 230, 164, 0, 0, 0, 0, 0, 0, 0}, 70, 122},
-	{{225, 10, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 97},
-	{{16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 220},
-	{{166, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 80},
-	{{29, 190, 131, 215, 232, 246, 41, 226, 52, 192, 0, 0, 0, 0, 0, 0}, 77, 133},
-	{{138, 74, 163, 93, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 38, 93},
-	{{229, 64, 97, 41, 28, 243, 249, 185, 97, 35, 49, 27, 175, 24, 0, 0}, 110, 176},
-	{{6, 73, 94, 160, 186, 216, 84, 117, 233, 169, 146, 234, 0, 0, 0, 0}, 95, 68},
-	{{163, 40, 242, 81, 224, 35, 72, 194, 176, 78, 224, 174, 12, 0, 0, 0}, 103, 247},
-	{{2, 205, 40, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 240},
-	{{174, 225, 240, 160, 212, 8, 246, 67, 36, 0, 0, 0, 0, 0, 0, 0}, 74, 83},
-	{{5, 117, 182, 141, 166, 249, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 132},
-	{{46, 152, 169, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 217},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 214},
-	{{233, 202, 159, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 193},
-	{{172, 54, 159, 5, 14, 245, 106, 182, 2, 0, 0, 0, 0, 0, 0, 0}, 71, 61},
-	{{241, 222, 251, 114, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 65},
-	{{31, 243, 190, 4, 207, 198, 249, 59, 167, 127, 93, 64, 0, 0, 0, 0}, 91, 108},
-	{{201, 35, 222, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 244},
-	{{187, 105, 13, 114, 238, 197, 145, 23, 169, 116, 91, 28, 0, 0, 0, 0}, 95, 194},
-	{{251, 251, 121, 168, 152, 178, 147, 188, 229, 123, 154, 242, 190, 165, 173, 48}, 124, 82},
-	{{66, 187, 191, 164, 31, 196, 40, 186, 148, 115, 134, 57, 222, 254, 48, 0}, 116, 45},
-	{{209, 17, 111, 41, 154, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 224},
-	{{40, 245, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 17},
-	{{72, 121, 151, 83, 170, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 133},
-	{{171, 172, 101, 238, 201, 148, 23, 81, 4, 11, 64, 0, 0, 0, 0, 0}, 85, 125},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 42},
-	{{20, 46, 27, 93, 195, 184, 6, 162, 109, 225, 22, 152, 0, 0, 0, 0}, 96, 140},
-	{{243, 122, 30, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 91},
-	{{89, 250, 80, 72, 148, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 92},
-	{{187, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 125},
-	{{172, 160, 143, 114, 128, 239, 174, 133, 176, 154, 159, 134, 10, 0, 0, 0}, 106, 249},
-	{{254, 202, 113, 112, 173, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 202},
-	{{80, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 107},
-	{{222, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 13, 124},
-	{{219, 138, 253, 12, 188, 197, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 57},
-	{{124, 41, 173, 8, 202, 192, 61, 254, 174, 48, 239, 112, 0, 0, 0, 0}, 92, 181},
-	{{195, 236, 245, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 107},
-	{{83, 82, 42, 244, 136, 191, 197, 81, 91, 154, 216, 85, 29, 150, 198, 22}, 128, 101},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 102},
-	{{44, 30, 219, 248, 214, 88, 225, 132, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 136},
-	{{41, 171, 206, 178, 195, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 114},
-	{{159, 15, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 215},
-	{{42, 188, 37, 174, 86, 40, 4, 84, 174, 216, 0, 0, 0, 0, 0, 0}, 79, 249},
-	{{185, 227, 85, 177, 219, 95, 250, 227, 69, 154, 118, 0, 0, 0, 0, 0}, 88, 29},
-	{{22, 185, 238, 100, 25, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 44, 71},
-	{{122, 149, 117, 77, 88, 250, 187, 203, 136, 22, 85, 42, 105, 234, 79, 8}, 127, 112},
-	{{93, 152, 229, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 31, 72},
-	{{129, 37, 165, 167, 241, 24, 37, 40, 2, 128, 0, 0, 0, 0, 0, 0}, 73, 155},
-	{{30, 202, 177, 3, 253, 202, 164, 248, 0, 0, 0, 0, 0, 0, 0, 0}, 61, 66},
-	{{176, 25, 220, 120, 194, 228, 10, 45, 225, 142, 192, 96, 0, 0, 0, 0}, 91, 77},
-	{{96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 109},
-	{{82, 56, 12, 204, 61, 45, 147, 240, 221, 0, 0, 0, 0, 0, 0, 0}, 72, 37},
-	{{242, 38, 240, 41, 140, 75, 250, 37, 175, 115, 97, 224, 0, 0, 0, 0}, 91, 56},
-	{{251, 192, 23, 90, 135, 56, 252, 56, 79, 219, 80, 167, 22, 0, 0, 0}, 103, 5},
-	{{62, 128, 139, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 15},
-	{{214, 1, 84, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 183},
-	{{207, 90, 237, 137, 171, 140, 227, 88, 250, 26, 197, 162, 163, 0, 0, 0}, 105, 171},
-	{{196, 151, 235, 232, 114, 248, 1, 207, 193, 184, 186, 71, 157, 0, 0, 0}, 112, 202},
-	{{152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 136},
-	{{9, 174, 211, 200, 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 37, 107},
-	{{89, 150, 95, 28, 209, 13, 125, 159, 254, 244, 110, 0, 0, 0, 0, 0}, 87, 193},
-	{{23, 28, 202, 10, 90, 158, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 4},
-	{{48, 25, 180, 9, 84, 236, 6, 144, 30, 198, 41, 56, 0, 0, 0, 0}, 96, 68},
-	{{252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 7, 40},
-	{{20, 165, 57, 130, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 255},
-	{{167, 56, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 108},
-	{{91, 204, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 219},
-	{{24, 46, 9, 4, 170, 150, 56, 130, 127, 120, 118, 104, 168, 48, 0, 0}, 108, 12},
-	{{156, 60, 245, 247, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 84},
-	{{148, 104, 187, 174, 129, 28, 127, 162, 92, 222, 52, 18, 0, 0, 0, 0}, 96, 33},
-	{{38, 253, 182, 153, 233, 194, 159, 41, 94, 193, 254, 160, 0, 0, 0, 0}, 91, 199},
-	{{156, 77, 105, 235, 145, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 52},
-	{{100, 211, 238, 147, 65, 222, 99, 73, 252, 113, 46, 113, 52, 136, 0, 0}, 113, 184},
-	{{13, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 124},
-	{{29, 240, 141, 230, 78, 237, 25, 135, 131, 6, 65, 77, 77, 248, 0, 0}, 109, 128},
-	{{15, 192, 109, 31, 149, 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 255},
-	{{80, 185, 170, 71, 41, 58, 158, 106, 253, 7, 2, 184, 173, 0, 0, 0}, 105, 146},
-	{{16, 229, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 24, 172},
-	{{169, 2, 153, 9, 169, 203, 245, 154, 184, 0, 0, 0, 0, 0, 0, 0}, 70, 116},
-	{{144, 135, 239, 164, 142, 187, 64, 109, 0, 0, 0, 0, 0, 0, 0, 0}, 66, 189},
-	{{170, 78, 252, 227, 242, 199, 130, 251, 200, 0, 0, 0, 0, 0, 0, 0}, 70, 10},
-	{{232, 18, 15, 126, 166, 126, 58, 25, 209, 62, 76, 79, 0, 0, 0, 0}, 98, 184},
-	{{170, 82, 72, 53, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 33, 98},
-	{{152, 100, 37, 122, 242, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 37},
-	{{174, 231, 230, 33, 71, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 174},
-	{{74, 225, 252, 153, 202, 8, 162, 39, 64, 0, 0, 0, 0, 0, 0, 0}, 67, 251},
-	{{167, 186, 101, 187, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 115},
-	{{83, 7, 21, 122, 243, 67, 171, 146, 145, 160, 168, 103, 223, 64, 0, 0}, 107, 252},
-	{{83, 132, 219, 86, 86, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 176},
-	{{22, 113, 72, 102, 73, 16, 236, 57, 197, 122, 31, 0, 0, 0, 0, 0}, 91, 155},
-	{{250, 59, 64, 35, 72, 112, 159, 85, 200, 5, 193, 39, 152, 185, 148, 16}, 124, 36},
-	{{220, 21, 48, 164, 224, 121, 17, 69, 10, 118, 106, 0, 0, 0, 0, 0}, 88, 202},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 5, 208},
-	{{247, 64, 83, 125, 195, 225, 50, 76, 18, 104, 0, 0, 0, 0, 0, 0}, 77, 158},
-	{{78, 91, 31, 202, 189, 25, 13, 133, 220, 0, 0, 0, 0, 0, 0, 0}, 72, 136},
-	{{105, 197, 26, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 191},
-	{{14, 31, 154, 242, 241, 231, 55, 151, 223, 56, 134, 255, 113, 206, 69, 0}, 120, 126},
-	{{247, 193, 58, 176, 16, 71, 31, 120, 213, 104, 231, 83, 26, 118, 91, 135}, 128, 139},
-	{{136, 32, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 25, 216},
-	{{100, 238, 112, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 29, 93},
-	{{80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 196},
-	{{233, 224, 254, 57, 33, 205, 140, 217, 181, 72, 0, 0, 0, 0, 0, 0}, 81, 119},
-	{{107, 75, 65, 158, 128, 142, 191, 188, 188, 240, 148, 243, 116, 0, 0, 0}, 104, 93},
-	{{39, 70, 120, 114, 69, 237, 95, 48, 233, 176, 91, 154, 0, 0, 0, 0}, 96, 183},
-	{{10, 61, 43, 101, 64, 102, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 207},
-	{{151, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 102},
-	{{210, 241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 19, 36},
-	{{52, 222, 249, 31, 108, 137, 199, 1, 242, 173, 184, 144, 0, 0, 0, 0}, 93, 41},
-	{{123, 111, 88, 192, 69, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 70},
-	{{180, 82, 188, 125, 140, 8, 196, 74, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 218},
-	{{77, 158, 34, 101, 196, 102, 56, 220, 42, 143, 181, 187, 240, 64, 161, 0}, 120, 226},
-	{{88, 220, 222, 38, 23, 108, 5, 148, 185, 110, 20, 14, 67, 61, 0, 0}, 114, 25},
-	{{90, 65, 220, 165, 197, 133, 110, 92, 228, 19, 2, 17, 0, 0, 0, 0}, 98, 6},
-	{{35, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 26},
-	{{103, 123, 49, 209, 228, 229, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 149},
-	{{50, 244, 58, 191, 95, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 46, 127},
-	{{140, 169, 75, 77, 78, 86, 40, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 62, 144},
-	{{99, 176, 175, 83, 114, 50, 214, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 56, 213},
-	{{19, 208, 211, 76, 85, 176, 247, 64, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 115},
-	{{153, 28, 188, 113, 211, 116, 7, 178, 136, 205, 96, 0, 0, 0, 0, 0}, 83, 146},
-	{{160, 180, 220, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 58},
-	{{234, 6, 112, 19, 61, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 222},
-	{{97, 110, 34, 117, 149, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 16},
-	{{99, 173, 119, 73, 250, 30, 144, 30, 128, 0, 0, 0, 0, 0, 0, 0}, 65, 169},
-	{{169, 134, 111, 89, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 175},
-	{{134, 80, 227, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 3},
-	{{231, 243, 35, 80, 75, 207, 128, 137, 54, 170, 71, 238, 0, 0, 0, 0}, 96, 2},
-	{{189, 190, 121, 135, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 193},
-	{{143, 155, 216, 193, 239, 205, 204, 153, 143, 236, 69, 23, 200, 211, 0, 0}, 118, 151},
-	{{32, 1, 115, 244, 33, 219, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 182},
-	{{220, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 148},
-	{{206, 87, 135, 235, 116, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 42, 53},
-	{{152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 11, 87},
-	{{58, 146, 188, 233, 230, 236, 192, 214, 168, 128, 0, 0, 0, 0, 0, 0}, 73, 235},
-	{{84, 220, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 51},
-	{{106, 145, 142, 42, 186, 186, 58, 1, 48, 98, 165, 131, 48, 156, 192, 0}, 116, 11},
-	{{53, 219, 120, 242, 166, 214, 81, 130, 64, 0, 0, 0, 0, 0, 0, 0}, 68, 28},
-	{{240, 120, 76, 163, 32, 197, 181, 251, 98, 220, 29, 226, 0, 0, 0, 0}, 96, 73},
-	{{234, 197, 12, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 216},
-	{{191, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 16, 99},
-	{{200, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 35},
-	{{29, 129, 47, 83, 19, 75, 158, 1, 28, 24, 26, 147, 82, 119, 140, 100}, 127, 195},
-	{{241, 174, 26, 53, 152, 112, 200, 134, 84, 187, 177, 176, 42, 64, 0, 0}, 108, 176},
-	{{77, 171, 145, 48, 195, 84, 190, 36, 122, 199, 18, 0, 0, 0, 0, 0}, 87, 217},
-	{{105, 104, 135, 53, 226, 118, 238, 169, 9, 253, 132, 162, 217, 123, 191, 96}, 126, 244},
-	{{160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 125},
-	{{41, 85, 143, 128, 91, 137, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 219},
-	{{116, 110, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 165},
-	{{75, 213, 44, 16, 43, 157, 34, 171, 98, 117, 109, 151, 5, 60, 224, 0}, 117, 6},
-	{{229, 23, 116, 61, 80, 139, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 53, 47},
-	{{83, 123, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 23, 73},
-	{{151, 243, 45, 217, 216, 158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 47, 98},
-	{{171, 184, 110, 211, 237, 114, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 52, 21},
-	{{7, 246, 199, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 32, 142},
-	{{103, 47, 70, 17, 31, 232, 44, 75, 145, 155, 100, 216, 0, 0, 0, 0}, 93, 34},
-	{{65, 170, 169, 100, 167, 147, 142, 251, 20, 64, 0, 0, 0, 0, 0, 0}, 74, 41},
-	{{235, 6, 229, 248, 151, 137, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55, 80},
-	{{156, 39, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 22, 11},
-	{{92, 188, 82, 192, 142, 249, 190, 128, 0, 0, 0, 0, 0, 0, 0, 0}, 58, 254},
-	{{253, 218, 181, 46, 134, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 45, 95},
-	{{189, 19, 31, 244, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 40, 8},
-	{{30, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 14, 212},
-	{{81, 226, 13, 173, 79, 123, 223, 124, 108, 80, 83, 238, 0, 0, 0, 0}, 95, 217},
-	{{126, 211, 206, 82, 147, 215, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 15},
-	{{42, 229, 135, 197, 196, 243, 94, 181, 133, 34, 16, 0, 0, 0, 0, 0}, 84, 66},
-	{{68, 210, 158, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 122},
-	{{183, 63, 223, 94, 81, 41, 203, 20, 236, 212, 220, 199, 0, 0, 0, 0}, 97, 12},
-	{{131, 146, 2, 125, 174, 43, 231, 20, 194, 0, 0, 0, 0, 0, 0, 0}, 71, 171},
-	{{31, 180, 246, 158, 28, 192, 236, 39, 237, 55, 74, 195, 171, 192, 0, 0}, 106, 42},
-	{{179, 10, 70, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 28, 194},
-	{{147, 51, 85, 185, 234, 209, 236, 87, 147, 17, 7, 68, 148, 32, 0, 0}, 107, 237},
-	{{177, 178, 6, 40, 46, 166, 87, 198, 214, 234, 23, 224, 0, 0, 0, 0}, 93, 151},
-	{{201, 53, 40, 20, 49, 4, 38, 139, 133, 217, 214, 134, 89, 200, 0, 0}, 109, 238},
-	{{4, 26, 181, 37, 206, 129, 233, 32, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 128},
-	{{81, 58, 248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 227},
-	{{18, 238, 250, 161, 57, 246, 208, 118, 14, 76, 73, 25, 65, 22, 152, 120}, 127, 138},
-	{{31, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 60},
-	{{115, 195, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 18, 148},
-	{{116, 22, 75, 33, 16, 129, 35, 124, 10, 112, 31, 213, 181, 108, 177, 46}, 128, 129},
-	{{117, 214, 20, 80, 83, 51, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 202},
-	{{120, 75, 124, 149, 120, 123, 242, 151, 181, 164, 128, 0, 0, 0, 0, 0}, 81, 88},
-	{{87, 238, 168, 62, 88, 166, 52, 104, 219, 169, 93, 128, 0, 0, 0, 0}, 90, 3},
-	{{237, 44, 224, 146, 52, 85, 245, 192, 65, 137, 37, 95, 156, 176, 0, 0}, 108, 243},
-	{{214, 241, 51, 63, 73, 61, 193, 165, 23, 108, 0, 0, 0, 0, 0, 0}, 80, 95},
-	{{87, 242, 21, 157, 45, 188, 36, 62, 66, 243, 64, 0, 0, 0, 0, 0}, 87, 255},
-	{{0, 97, 220, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 48},
-	{{227, 206, 189, 31, 222, 8, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 50, 38},
-	{{174, 27, 0, 16, 13, 150, 33, 122, 154, 59, 236, 35, 248, 178, 64, 0}, 115, 20},
-	{{39, 20, 125, 69, 252, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 41},
-	{{141, 232, 1, 12, 125, 229, 168, 14, 125, 116, 180, 0, 0, 0, 0, 0}, 92, 133},
-	{{93, 238, 40, 228, 254, 203, 251, 6, 60, 82, 243, 242, 0, 0, 0, 0}, 95, 189},
-	{{44, 115, 200, 17, 146, 223, 115, 253, 126, 206, 152, 90, 0, 0, 0, 0}, 95, 151},
-	{{213, 58, 235, 255, 6, 163, 61, 10, 224, 0, 0, 0, 0, 0, 0, 0}, 68, 100},
-	{{25, 86, 139, 116, 190, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 49, 118},
-	{{113, 40, 65, 141, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 34, 164},
-	{{149, 205, 200, 186, 19, 126, 215, 199, 94, 37, 100, 32, 128, 0, 0, 0}, 98, 71},
-	{{39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 251},
-	{{81, 87, 80, 173, 163, 166, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 51},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 185},
-	{{140, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 144},
-	{{6, 42, 1, 178, 250, 53, 186, 178, 114, 121, 192, 0, 0, 0, 0, 0}, 84, 51},
-	{{2, 17, 234, 51, 169, 5, 219, 149, 245, 237, 4, 0, 0, 0, 0, 0}, 87, 32},
-	{{112, 187, 173, 17, 229, 171, 225, 170, 8, 0, 0, 0, 0, 0, 0, 0}, 70, 137},
-	{{203, 71, 140, 237, 113, 96, 123, 16, 0, 0, 0, 0, 0, 0, 0, 0}, 60, 2},
-	{{99, 138, 207, 2, 244, 25, 211, 98, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 163},
-	{{114, 42, 98, 246, 252, 48, 233, 118, 63, 226, 157, 226, 192, 0, 0, 0}, 100, 162},
-	{{161, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, 192},
-	{{233, 70, 240, 45, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 36, 185},
-	{{28, 123, 31, 176, 235, 229, 169, 192, 0, 0, 0, 0, 0, 0, 0, 0}, 59, 51},
-	{{146, 197, 243, 235, 243, 56, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 93},
-	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 159},
-	{{141, 92, 13, 27, 87, 241, 171, 143, 220, 0, 0, 0, 0, 0, 0, 0}, 72, 189},
-	{{164, 151, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 21, 248},
-	{{35, 188, 248, 79, 39, 151, 232, 215, 248, 245, 185, 144, 78, 102, 173, 128}, 123, 38},
-	{{193, 232, 166, 60, 62, 80, 230, 225, 165, 240, 0, 0, 0, 0, 0, 0}, 76, 167},
-	{{109, 229, 118, 155, 43, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 28},
-	{{160, 62, 63, 212, 218, 138, 154, 108, 163, 127, 197, 237, 183, 44, 140, 192}, 125, 37},
-	{{196, 37, 51, 146, 26, 85, 53, 31, 216, 141, 52, 218, 153, 32, 0, 0}, 107, 234},
-	{{228, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 9, 70},
-	{{154, 248, 20, 242, 154, 244, 63, 17, 121, 52, 70, 84, 118, 208, 0, 0}, 108, 50},
-	{{41, 100, 27, 84, 106, 112, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51, 171},
-	{{81, 99, 197, 139, 30, 150, 230, 216, 81, 190, 84, 165, 29, 64, 128, 0}, 113, 236},
-	{{112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 3},
-	{{164, 119, 253, 126, 160, 249, 183, 191, 119, 111, 224, 0, 0, 0, 0, 0}, 86, 64},
-	{{138, 58, 198, 254, 0, 197, 60, 91, 132, 199, 181, 251, 78, 160, 0, 0}, 108, 213},
-	{{209, 89, 168, 236, 146, 169, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 54, 15},
-	{{131, 210, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 20, 145},
-	{{165, 190, 157, 7, 131, 5, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 57, 27},
-	{{179, 226, 57, 204, 187, 70, 52, 81, 119, 162, 229, 42, 47, 185, 9, 162}, 127, 75},
-	{{98, 235, 155, 51, 107, 167, 127, 137, 254, 246, 162, 171, 180, 13, 233, 0}, 123, 76},
-	{{107, 79, 76, 90, 94, 151, 155, 31, 33, 115, 19, 204, 98, 115, 0, 0}, 113, 247},
-	{{143, 46, 30, 175, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 43, 121},
-	{{155, 85, 217, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 30, 214},
-	{{58, 62, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 26, 221},
-	{{92, 155, 53, 3, 39, 108, 155, 200, 0, 0, 0, 0, 0, 0, 0, 0}, 63, 102},
-	{{64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 191},
-	{{63, 134, 251, 59, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 39, 197},
-	{{234, 149, 220, 106, 0, 144, 214, 128, 35, 102, 0, 0, 0, 0, 0, 0}, 79, 106},
+	{RTE_IPV6(0x4246, 0x9a8f, 0xc5e9, 0, 0, 0, 0, 0), 50, 146},
+	{RTE_IPV6(0x6b4f, 0x12eb, 0x8e54, 0x5000, 0, 0, 0, 0), 54, 141},
+	{RTE_IPV6(0xf784, 0x7101, 0xd7f7, 0xb7ef, 0x8000, 0, 0, 0), 67, 23},
+	{RTE_IPV6(0x3013, 0x290c, 0x4c65, 0x72a0, 0x2d67, 0x8692, 0x8000, 0), 97, 252},
+	{RTE_IPV6(0x0546, 0xd0aa, 0x1300, 0x7400, 0, 0, 0, 0), 54, 6},
+	{RTE_IPV6(0x0100, 0, 0, 0, 0, 0, 0, 0), 9, 137},
+	{RTE_IPV6(0x0cbc, 0x1a12, 0, 0, 0, 0, 0, 0), 31, 9},
+	{RTE_IPV6(0x01eb, 0x65ca, 0x1a5c, 0x1716, 0xb3df, 0x8000, 0, 0), 82, 9},
+	{RTE_IPV6(0xd713, 0xe066, 0x2d85, 0x66f9, 0x3814, 0xd6db, 0x5d7d, 0x3400), 120, 163},
+	{RTE_IPV6(0xb2b7, 0x6d40, 0x8854, 0x0b35, 0xd966, 0, 0, 0), 79, 197},
+	{RTE_IPV6(0xd427, 0x9e47, 0xfd62, 0xf800, 0, 0, 0, 0), 54, 249},
+	{RTE_IPV6(0x5c3a, 0x9f82, 0x6938, 0, 0, 0, 0, 0), 47, 88},
+	{RTE_IPV6(0x768c, 0x41c6, 0xd45d, 0x9000, 0, 0, 0, 0), 52, 104},
+	{RTE_IPV6(0x5640, 0, 0, 0, 0, 0, 0, 0), 10, 36},
+	{RTE_IPV6(0x4f87, 0xf2c1, 0xc50b, 0xc800, 0, 0, 0, 0), 54, 239},
+	{RTE_IPV6(0xa3e4, 0xef50, 0x2942, 0xb0b0, 0, 0, 0, 0), 67, 201},
+	{RTE_IPV6(0x1f09, 0xe72a, 0, 0, 0, 0, 0, 0), 33, 94},
+	{RTE_IPV6(0x6c90, 0xcd27, 0xd71a, 0x6000, 0, 0, 0, 0), 51, 241},
+	{RTE_IPV6(0xf7d9, 0xac00, 0, 0, 0, 0, 0, 0), 24, 239},
+	{RTE_IPV6(0x18ba, 0x49b6, 0xf0fb, 0x7da5, 0, 0, 0, 0), 66, 151},
+	{RTE_IPV6(0xf570, 0, 0, 0, 0, 0, 0, 0), 12, 137},
+	{RTE_IPV6(0x2c5e, 0x8ae0, 0xa800, 0, 0, 0, 0, 0), 41, 231},
+	{RTE_IPV6(0xb8dd, 0x6d87, 0xe120, 0, 0, 0, 0, 0), 44, 11},
+	{RTE_IPV6(0x33b3, 0x88b8, 0x1e76, 0x1810, 0x1aa1, 0xce65, 0, 0), 96, 20},
+	{RTE_IPV6(0x302e, 0, 0, 0, 0, 0, 0, 0), 15, 68},
+	{RTE_IPV6(0x8feb, 0xeddc, 0x5977, 0xbb8f, 0xd15e, 0x2e3a, 0x7800, 0), 101, 64},
+	{RTE_IPV6(0x79be, 0x5ab1, 0x8000, 0, 0, 0, 0, 0), 33, 152},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 6, 217},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 101},
+	{RTE_IPV6(0x6fd6, 0, 0, 0, 0, 0, 0, 0), 15, 58},
+	{RTE_IPV6(0xa217, 0x3440, 0, 0, 0, 0, 0, 0), 27, 254},
+	{RTE_IPV6(0x4c67, 0x2c4f, 0, 0, 0, 0, 0, 0), 32, 148},
+	{RTE_IPV6(0x5055, 0xdbd6, 0x0c04, 0x4181, 0xa294, 0xd04e, 0x2745, 0x5eb8), 126, 126},
+	{RTE_IPV6(0x5036, 0xfb1c, 0x9817, 0xf4c0, 0x9753, 0x0690, 0xdfd5, 0xe080), 123, 76},
+	{RTE_IPV6(0x27e8, 0xed67, 0xbfbc, 0x2400, 0, 0, 0, 0), 54, 240},
+	{RTE_IPV6(0x14e7, 0x59d2, 0xa7ad, 0x5000, 0, 0, 0, 0), 54, 33},
+	{RTE_IPV6(0x7d43, 0xc680, 0, 0, 0, 0, 0, 0), 25, 47},
+	{RTE_IPV6(0x1aef, 0x9905, 0xd579, 0x1f72, 0xa12e, 0x540f, 0x94a0, 0), 109, 41},
+	{RTE_IPV6(0x66d4, 0x9f76, 0xdf73, 0x86ac, 0, 0, 0, 0), 62, 72},
+	{RTE_IPV6(0x55b5, 0xf17f, 0x032c, 0, 0, 0, 0, 0), 46, 43},
+	{RTE_IPV6(0x3dc7, 0x83e2, 0x03e6, 0x5e77, 0xf000, 0, 0, 0), 68, 26},
+	{RTE_IPV6(0x008f, 0xa0b8, 0xa2c0, 0, 0, 0, 0, 0), 42, 139},
+	{RTE_IPV6(0xaa18, 0, 0, 0, 0, 0, 0, 0), 13, 219},
+	{RTE_IPV6(0x3d7a, 0x18fb, 0x7c7a, 0xcac0, 0, 0, 0, 0), 58, 105},
+	{RTE_IPV6(0x21db, 0xe203, 0xb4be, 0, 0, 0, 0, 0), 47, 210},
+	{RTE_IPV6(0x33fb, 0, 0, 0, 0, 0, 0, 0), 17, 151},
+	{RTE_IPV6(0x6ab9, 0x0b7a, 0xc5c0, 0, 0, 0, 0, 0), 42, 28},
+	{RTE_IPV6(0xc000, 0, 0, 0, 0, 0, 0, 0), 9, 64},
+	{RTE_IPV6(0xefc3, 0x4def, 0x839c, 0x02f6, 0xbfb2, 0xcca0, 0x15d5, 0x1e80), 121, 9},
+	{RTE_IPV6(0x8dcf, 0xb563, 0x37f5, 0x97e4, 0x4132, 0x5510, 0, 0), 92, 250},
+	{RTE_IPV6(0x6e9f, 0xe6fb, 0xe0d2, 0x3a31, 0, 0, 0, 0), 66, 200},
+	{RTE_IPV6(0x861a, 0x6820, 0x8129, 0xc932, 0xa445, 0xb29c, 0x9c85, 0x08da), 127, 132},
+	{RTE_IPV6(0xfdcf, 0x7469, 0xd2a6, 0xba63, 0xb600, 0, 0, 0), 71, 182},
+	{RTE_IPV6(0xd349, 0x2650, 0xb7a8, 0x348a, 0x19d6, 0x7008, 0xfc00, 0), 102, 7},
+	{RTE_IPV6(0xc8f4, 0x6cee, 0xa48d, 0xd727, 0xe9f9, 0x7850, 0x7000, 0), 100, 146},
+	{RTE_IPV6(0x6b2c, 0xfaca, 0x4025, 0x6b69, 0x8c00, 0, 0, 0), 70, 98},
+	{RTE_IPV6(0x5d56, 0x381b, 0x9fc3, 0x7e27, 0xf0c9, 0x3000, 0, 0), 86, 179},
+	{RTE_IPV6(0x20ca, 0xd6f2, 0x278d, 0x3d92, 0x8a60, 0, 0, 0), 77, 245},
+	{RTE_IPV6(0xa74d, 0xf91c, 0xd2c4, 0xe3f1, 0, 0, 0, 0), 64, 2},
+	{RTE_IPV6(0xf13b, 0x8000, 0, 0, 0, 0, 0, 0), 17, 5},
+	{RTE_IPV6(0x8f44, 0x92d2, 0xad9b, 0xfbad, 0, 0, 0, 0), 66, 169},
+	{RTE_IPV6(0xa7b4, 0xe290, 0, 0, 0, 0, 0, 0), 33, 52},
+	{RTE_IPV6(0xf100, 0, 0, 0, 0, 0, 0, 0), 9, 177},
+	{RTE_IPV6(0xee09, 0xa860, 0, 0, 0, 0, 0, 0), 27, 74},
+	{RTE_IPV6(0xcb94, 0x1060, 0x7d12, 0x5601, 0x5bf4, 0xfb14, 0x1f0e, 0x4b80), 122, 212},
+	{RTE_IPV6(0x6fe3, 0x895e, 0x4115, 0x4d89, 0x7782, 0x9f13, 0x9f2d, 0x12c0), 122, 238},
+	{RTE_IPV6(0x3b90, 0, 0, 0, 0, 0, 0, 0), 19, 18},
+	{RTE_IPV6(0x6ec0, 0xff78, 0x54d7, 0x0382, 0x26e0, 0, 0, 0), 75, 155},
+	{RTE_IPV6(0x984f, 0xdb00, 0, 0, 0, 0, 0, 0), 24, 97},
+	{RTE_IPV6(0x76ba, 0x9df8, 0, 0, 0, 0, 0, 0), 32, 8},
+	{RTE_IPV6(0x4680, 0, 0, 0, 0, 0, 0, 0), 9, 123},
+	{RTE_IPV6(0xfd77, 0x72e3, 0x12f3, 0x513d, 0xee6b, 0xbe90, 0, 0), 92, 11},
+	{RTE_IPV6(0xa670, 0, 0, 0, 0, 0, 0, 0), 13, 211},
+	{RTE_IPV6(0x2b5f, 0xe000, 0, 0, 0, 0, 0, 0), 20, 116},
+	{RTE_IPV6(0x5e80, 0, 0, 0, 0, 0, 0, 0), 11, 57},
+	{RTE_IPV6(0xb6fb, 0xc384, 0x4207, 0xd092, 0xdfe7, 0xd3b5, 0x19b0, 0), 108, 178},
+	{RTE_IPV6(0x98a6, 0x6fe9, 0xc211, 0xe629, 0xddfd, 0x457b, 0x6c00, 0), 102, 93},
+	{RTE_IPV6(0x6a8d, 0xebbe, 0x52f1, 0x98ba, 0xc351, 0x5690, 0, 0), 92, 3},
+	{RTE_IPV6(0x2051, 0xd299, 0x971d, 0x0b3e, 0x7fb1, 0xc2fe, 0x6753, 0x3a80), 121, 162},
+	{RTE_IPV6(0x4f70, 0xe01a, 0xae27, 0x62b5, 0x7339, 0xd1bd, 0x8830, 0), 109, 125},
+	{RTE_IPV6(0x6ac5, 0x5397, 0x4000, 0, 0, 0, 0, 0), 34, 33},
+	{RTE_IPV6(0xbe00, 0, 0, 0, 0, 0, 0, 0), 9, 254},
+	{RTE_IPV6(0x9c49, 0xf994, 0x37c0, 0x142a, 0x8e80, 0, 0, 0), 74, 66},
+	{RTE_IPV6(0x406b, 0x2478, 0, 0, 0, 0, 0, 0), 30, 4},
+	{RTE_IPV6(0x7394, 0x47fa, 0x9eae, 0xa8f9, 0x6a6e, 0xc400, 0, 0), 86, 122},
+	{RTE_IPV6(0x128b, 0x982c, 0x2658, 0, 0, 0, 0, 0), 46, 59},
+	{RTE_IPV6(0x37e5, 0x756a, 0x925f, 0x4adc, 0x7a00, 0x54ca, 0xb78a, 0x7800), 117, 99},
+	{RTE_IPV6(0x99d3, 0x0360, 0, 0, 0, 0, 0, 0), 27, 41},
+	{RTE_IPV6(0x0100, 0, 0, 0, 0, 0, 0, 0), 8, 112},
+	{RTE_IPV6(0x31c0, 0x668e, 0xd803, 0x7240, 0xa580, 0xa800, 0, 0), 85, 255},
+	{RTE_IPV6(0xc98f, 0xf0f0, 0xd1e0, 0, 0, 0, 0, 0), 44, 106},
+	{RTE_IPV6(0x9e13, 0xa4c4, 0x57a2, 0x2178, 0, 0, 0, 0), 62, 170},
+	{RTE_IPV6(0x0572, 0, 0, 0, 0, 0, 0, 0), 16, 86},
+	{RTE_IPV6(0x22aa, 0xf63e, 0xc655, 0xc1e3, 0xfc44, 0, 0, 0), 79, 155},
+	{RTE_IPV6(0x1534, 0x0956, 0xe000, 0, 0, 0, 0, 0), 35, 65},
+	{RTE_IPV6(0xcb51, 0x31ab, 0xe000, 0, 0, 0, 0, 0), 36, 39},
+	{RTE_IPV6(0xd3da, 0x57f4, 0x5db5, 0x7629, 0x9c8f, 0xfe00, 0, 0), 90, 162},
+	{RTE_IPV6(0x4d40, 0, 0, 0, 0, 0, 0, 0), 10, 69},
+	{RTE_IPV6(0x9edb, 0xdb27, 0x04db, 0x643f, 0, 0, 0, 0), 64, 163},
+	{RTE_IPV6(0x3d32, 0xe801, 0xb9fc, 0xf336, 0xbdf0, 0xaac0, 0, 0), 90, 116},
+	{RTE_IPV6(0xf18f, 0x2113, 0xf737, 0x2000, 0, 0, 0, 0), 53, 19},
+	{RTE_IPV6(0x3d1c, 0x3dfc, 0x2000, 0, 0, 0, 0, 0), 36, 48},
+	{RTE_IPV6(0x6670, 0xc26c, 0x5afd, 0x8000, 0, 0, 0, 0), 49, 230},
+	{RTE_IPV6(0x4a58, 0x3a42, 0xac29, 0x90cc, 0xc3f0, 0, 0, 0), 78, 155},
+	{RTE_IPV6(0x2c94, 0xbb3a, 0xbe3b, 0xbebb, 0x7c8a, 0xde83, 0, 0), 96, 158},
+	{RTE_IPV6(0x4307, 0xd88b, 0x5de0, 0x1487, 0xba56, 0xd16f, 0x3c50, 0), 113, 252},
+	{RTE_IPV6(0xd11a, 0x0cae, 0x0565, 0xa4b5, 0xed3f, 0xc039, 0x3678, 0), 110, 176},
+	{RTE_IPV6(0x0442, 0xe834, 0xef38, 0x303a, 0xc000, 0, 0, 0), 66, 211},
+	{RTE_IPV6(0x9ea5, 0x0290, 0, 0, 0, 0, 0, 0), 28, 15},
+	{RTE_IPV6(0x55cc, 0xf5c6, 0x442c, 0x2747, 0x2000, 0, 0, 0), 68, 95},
+	{RTE_IPV6(0xb586, 0x1957, 0x8000, 0, 0, 0, 0, 0), 34, 169},
+	{RTE_IPV6(0x1ae6, 0x3d24, 0x4fc0, 0, 0, 0, 0, 0), 44, 249},
+	{RTE_IPV6(0x05aa, 0xc68b, 0x41ba, 0xbc2d, 0x2afd, 0xa559, 0xce00, 0), 105, 61},
+	{RTE_IPV6(0xd3f5, 0x3e00, 0, 0, 0, 0, 0, 0), 23, 63},
+	{RTE_IPV6(0x7544, 0, 0, 0, 0, 0, 0, 0), 14, 43},
+	{RTE_IPV6(0x6711, 0x7b66, 0x46ce, 0x5a5c, 0x7cc6, 0, 0, 0), 81, 228},
+	{RTE_IPV6(0xc0ed, 0x58f4, 0x351e, 0x3da0, 0x8f40, 0, 0, 0), 78, 165},
+	{RTE_IPV6(0xc752, 0xd9b7, 0x02b3, 0xc306, 0, 0, 0, 0), 64, 3},
+	{RTE_IPV6(0x9de6, 0x4fa2, 0x397d, 0x9800, 0, 0, 0, 0), 57, 211},
+	{RTE_IPV6(0x1b43, 0x40eb, 0x8000, 0, 0, 0, 0, 0), 33, 210},
+	{RTE_IPV6(0x489e, 0xa36a, 0xc189, 0xbe07, 0xfaa5, 0xf949, 0x4000, 0), 99, 61},
+	{RTE_IPV6(0x22c0, 0, 0, 0, 0, 0, 0, 0), 10, 120},
+	{RTE_IPV6(0xd78d, 0x5fc0, 0xbd3e, 0, 0, 0, 0, 0), 47, 94},
+	{RTE_IPV6(0x1fb5, 0x388d, 0x7880, 0, 0, 0, 0, 0), 41, 153},
+	{RTE_IPV6(0x9949, 0x8000, 0, 0, 0, 0, 0, 0), 18, 221},
+	{RTE_IPV6(0xa26b, 0x29bd, 0xa59b, 0x168b, 0xa548, 0x6000, 0, 0), 87, 163},
+	{RTE_IPV6(0xda11, 0xcca5, 0xd9fb, 0x6b2d, 0x1d0f, 0xc0a7, 0x4b00, 0), 106, 188},
+	{RTE_IPV6(0xc87c, 0xeed5, 0x23e4, 0x5e8d, 0x56bb, 0x653c, 0x7334, 0x8310), 124, 15},
+	{RTE_IPV6(0x4aed, 0xa038, 0x8dd9, 0xbf10, 0, 0, 0, 0), 63, 28},
+	{RTE_IPV6(0xa32f, 0xf267, 0xadd9, 0x589a, 0x26c8, 0x2000, 0, 0), 84, 240},
+	{RTE_IPV6(0x14e3, 0x801c, 0x9093, 0x160d, 0x5e81, 0x6b58, 0, 0), 93, 59},
+	{RTE_IPV6(0x5f90, 0xe56b, 0xda7d, 0xcce9, 0xa12a, 0xb440, 0, 0), 90, 195},
+	{RTE_IPV6(0x9bdc, 0x53d0, 0x6c10, 0x869c, 0x8000, 0, 0, 0), 66, 10},
+	{RTE_IPV6(0xb38a, 0x3750, 0xbe99, 0x0ced, 0x1678, 0x4500, 0, 0), 88, 206},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 137},
+	{RTE_IPV6(0x0377, 0x9400, 0, 0, 0, 0, 0, 0), 22, 225},
+	{RTE_IPV6(0x0dc0, 0, 0, 0, 0, 0, 0, 0), 10, 223},
+	{RTE_IPV6(0x751c, 0, 0, 0, 0, 0, 0, 0), 15, 29},
+	{RTE_IPV6(0xa413, 0xc32f, 0x88be, 0x9cff, 0x1e4a, 0x8f86, 0xa200, 0), 103, 166},
+	{RTE_IPV6(0x28eb, 0x5e87, 0x87e6, 0x4721, 0x40e9, 0, 0, 0), 80, 178},
+	{RTE_IPV6(0xde97, 0xa661, 0x81fa, 0x8c00, 0, 0, 0, 0), 55, 38},
+	{RTE_IPV6(0xae80, 0, 0, 0, 0, 0, 0, 0), 9, 141},
+	{RTE_IPV6(0x06bd, 0x6496, 0xfa0d, 0x2e62, 0xe48b, 0x3234, 0x34c4, 0x8000), 116, 230},
+	{RTE_IPV6(0x4bfc, 0x59cd, 0x2534, 0x6a4f, 0xbc78, 0x3677, 0xa000, 0), 99, 124},
+	{RTE_IPV6(0x2612, 0x9206, 0x3f40, 0xe70a, 0x98c7, 0x058f, 0x9304, 0xfc00), 118, 54},
+	{RTE_IPV6(0x6f77, 0xa933, 0, 0, 0, 0, 0, 0), 32, 162},
+	{RTE_IPV6(0x6980, 0, 0, 0, 0, 0, 0, 0), 13, 32},
+	{RTE_IPV6(0x8f39, 0x3965, 0x62b6, 0x4ae3, 0xcd8f, 0xfded, 0x0800, 0), 102, 237},
+	{RTE_IPV6(0x1e00, 0, 0, 0, 0, 0, 0, 0), 7, 215},
+	{RTE_IPV6(0x0ee8, 0x3000, 0, 0, 0, 0, 0, 0), 22, 138},
+	{RTE_IPV6(0x0e35, 0x43d8, 0xe59b, 0x958b, 0x1ffd, 0xb87e, 0x856c, 0x2800), 118, 73},
+	{RTE_IPV6(0x163a, 0x288f, 0xbc84, 0xef0e, 0xb5fc, 0x51c0, 0, 0), 90, 43},
+	{RTE_IPV6(0x0bde, 0xb9f3, 0xf896, 0x4fe6, 0xd6d5, 0x0317, 0xc1c4, 0), 112, 88},
+	{RTE_IPV6(0x0ee2, 0xc675, 0x545d, 0x1660, 0x4df1, 0xad44, 0x44cc, 0x4800), 119, 91},
+	{RTE_IPV6(0x0f67, 0xf7db, 0x968e, 0x5c32, 0x9000, 0, 0, 0), 69, 140},
+	{RTE_IPV6(0x00d5, 0x4df4, 0x4000, 0, 0, 0, 0, 0), 37, 65},
+	{RTE_IPV6(0xb2ae, 0xaeef, 0x48b5, 0x24d9, 0x28a9, 0x0c68, 0x959d, 0x7d80), 122, 201},
+	{RTE_IPV6(0x7635, 0x3711, 0x61e3, 0xf3b0, 0x0200, 0, 0, 0), 72, 69},
+	{RTE_IPV6(0x15fd, 0x042f, 0, 0, 0, 0, 0, 0), 35, 170},
+	{RTE_IPV6(0x05f9, 0xba85, 0x4400, 0, 0, 0, 0, 0), 40, 192},
+	{RTE_IPV6(0x2f4f, 0x2342, 0x0bb2, 0xa11c, 0x57b4, 0x2d80, 0, 0), 89, 21},
+	{RTE_IPV6(0xf2e3, 0x1449, 0x96c4, 0, 0, 0, 0, 0), 46, 35},
+	{RTE_IPV6(0x79a9, 0x6676, 0x9dc0, 0x9aba, 0x7e00, 0, 0, 0), 71, 235},
+	{RTE_IPV6(0x098a, 0xc000, 0, 0, 0, 0, 0, 0), 21, 240},
+	{RTE_IPV6(0x2dad, 0x0e48, 0, 0, 0, 0, 0, 0), 30, 136},
+	{RTE_IPV6(0x7f2f, 0x33c9, 0xec2d, 0x8e50, 0, 0, 0, 0), 60, 186},
+	{RTE_IPV6(0xf7e9, 0x2226, 0xb5cf, 0x7f14, 0xe076, 0x3b94, 0, 0), 95, 174},
+	{RTE_IPV6(0x7ebb, 0xc668, 0xf5df, 0xdb12, 0x1f7c, 0, 0, 0), 79, 153},
+	{RTE_IPV6(0x03a3, 0x6be4, 0xc000, 0, 0, 0, 0, 0), 35, 118},
+	{RTE_IPV6(0xa76d, 0x025f, 0x0b3e, 0x2d80, 0, 0, 0, 0), 60, 113},
+	{RTE_IPV6(0x4c00, 0, 0, 0, 0, 0, 0, 0), 6, 58},
+	{RTE_IPV6(0x3abe, 0xcc97, 0xde93, 0x2f4e, 0x26cb, 0x0911, 0x4000, 0), 101, 206},
+	{RTE_IPV6(0xfedc, 0xfedc, 0xcc4f, 0x237f, 0xf23f, 0x6ae8, 0x7fb4, 0), 111, 42},
+	{RTE_IPV6(0x4d9c, 0x08d1, 0xb525, 0x4600, 0, 0, 0, 0), 55, 230},
+	{RTE_IPV6(0x4159, 0x894c, 0xd0c7, 0xa65a, 0x8000, 0, 0, 0), 67, 6},
+	{RTE_IPV6(0x2fe8, 0, 0, 0, 0, 0, 0, 0), 13, 254},
+	{RTE_IPV6(0xac9a, 0x0c6c, 0x4d25, 0x6a08, 0xea07, 0xf8d4, 0x70a0, 0), 108, 214},
+	{RTE_IPV6(0xfe75, 0xeff4, 0x9a59, 0xa6f1, 0x0c6c, 0x7f99, 0xcea0, 0), 107, 43},
+	{RTE_IPV6(0x71a0, 0xce34, 0x8f0c, 0x0994, 0xe000, 0, 0, 0), 67, 178},
+	{RTE_IPV6(0xb282, 0, 0, 0, 0, 0, 0, 0), 16, 179},
+	{RTE_IPV6(0xe5b1, 0x1c6a, 0x3b4b, 0xb6f1, 0x244f, 0xe000, 0, 0), 87, 236},
+	{RTE_IPV6(0x9c48, 0x5dc1, 0x32eb, 0x4be4, 0x5873, 0x5977, 0x8000, 0), 98, 184},
+	{RTE_IPV6(0x1ce8, 0x1cf9, 0x5369, 0xd307, 0x8893, 0xe740, 0, 0), 91, 95},
+	{RTE_IPV6(0xd921, 0x176b, 0x4a2a, 0x87c5, 0x9022, 0x28f3, 0x0d7e, 0x2488), 127, 152},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 2, 113},
+	{RTE_IPV6(0x55ac, 0x797e, 0xd539, 0xe136, 0xc549, 0x55fb, 0x0940, 0), 108, 137},
+	{RTE_IPV6(0x682e, 0x1947, 0x56dc, 0, 0, 0, 0, 0), 46, 224},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 6, 61},
+	{RTE_IPV6(0xf171, 0xfe6a, 0x3580, 0, 0, 0, 0, 0), 41, 205},
+	{RTE_IPV6(0x1d24, 0x0cf4, 0xc57f, 0xf008, 0xa786, 0x9af8, 0xc77b, 0x8ff0), 124, 170},
+	{RTE_IPV6(0x3a1d, 0x815e, 0x2b8b, 0xe000, 0, 0, 0, 0), 53, 117},
+	{RTE_IPV6(0xd57c, 0x93c4, 0x0752, 0x4346, 0xe400, 0, 0, 0), 70, 225},
+	{RTE_IPV6(0xa4a8, 0xa18c, 0x5755, 0xfa29, 0x2200, 0, 0, 0), 72, 34},
+	{RTE_IPV6(0xba8e, 0x8000, 0, 0, 0, 0, 0, 0), 17, 5},
+	{RTE_IPV6(0xedf9, 0x0946, 0xf761, 0x4000, 0, 0, 0, 0), 50, 92},
+	{RTE_IPV6(0x9b5c, 0x91da, 0x7de2, 0xe200, 0, 0, 0, 0), 55, 230},
+	{RTE_IPV6(0x23a9, 0x3e9c, 0x5604, 0x7ddb, 0x7771, 0xbf4b, 0xc671, 0), 112, 61},
+	{RTE_IPV6(0xcf3f, 0x60ba, 0x1a44, 0x73a1, 0xa33b, 0xbea6, 0x124e, 0xe800), 117, 221},
+	{RTE_IPV6(0x5628, 0xc8c7, 0xf756, 0x9fb3, 0xbfb8, 0x75ad, 0xd39e, 0x0080), 121, 105},
+	{RTE_IPV6(0x6840, 0, 0, 0, 0, 0, 0, 0), 11, 181},
+	{RTE_IPV6(0xcd23, 0x7bb2, 0x2440, 0x3e99, 0xc3fa, 0, 0, 0), 79, 110},
+	{RTE_IPV6(0x7528, 0x399d, 0x8aa0, 0xdf3b, 0x9b91, 0x4000, 0, 0), 86, 103},
+	{RTE_IPV6(0x4aa6, 0x8c92, 0x4a48, 0xe563, 0xa77c, 0x6b75, 0xd90e, 0xf640), 123, 218},
+	{RTE_IPV6(0x0cde, 0xf4b7, 0x5392, 0x2a00, 0, 0, 0, 0), 56, 146},
+	{RTE_IPV6(0x0b62, 0x926e, 0x5f60, 0x508e, 0xe000, 0, 0, 0), 67, 90},
+	{RTE_IPV6(0xeb05, 0xbbc7, 0x1eaa, 0x52bb, 0xe49f, 0x1619, 0xcc70, 0), 108, 197},
+	{RTE_IPV6(0x2360, 0x9291, 0x9b74, 0xfcb5, 0x1dcd, 0xe6f6, 0x1e00, 0), 103, 158},
+	{RTE_IPV6(0xae26, 0x38f4, 0xe366, 0xfced, 0x8056, 0, 0, 0), 81, 118},
+	{RTE_IPV6(0x4186, 0x253a, 0x5a7d, 0x3c54, 0, 0, 0, 0), 62, 95},
+	{RTE_IPV6(0xfd75, 0x8762, 0x8000, 0, 0, 0, 0, 0), 33, 152},
+	{RTE_IPV6(0x6f73, 0xbcb8, 0x8200, 0, 0, 0, 0, 0), 45, 239},
+	{RTE_IPV6(0xca18, 0x5909, 0x952d, 0x4000, 0, 0, 0, 0), 50, 48},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 5, 228},
+	{RTE_IPV6(0xf462, 0x348c, 0, 0, 0, 0, 0, 0), 30, 247},
+	{RTE_IPV6(0x97a7, 0x2bb2, 0x74c2, 0xad7e, 0xec62, 0x2800, 0, 0), 85, 12},
+	{RTE_IPV6(0x3c40, 0, 0, 0, 0, 0, 0, 0), 10, 129},
+	{RTE_IPV6(0xd000, 0, 0, 0, 0, 0, 0, 0), 4, 50},
+	{RTE_IPV6(0x7e0b, 0xd8f2, 0x072d, 0x79d0, 0x6e87, 0xd24b, 0x3bb6, 0xe42a), 128, 250},
+	{RTE_IPV6(0xd91a, 0xb892, 0x0312, 0xf00f, 0x8708, 0, 0, 0), 77, 249},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 230},
+	{RTE_IPV6(0x911c, 0x1db8, 0x0255, 0xea87, 0x626f, 0x8820, 0, 0), 92, 228},
+	{RTE_IPV6(0x6c68, 0xfffe, 0x225f, 0x489d, 0, 0, 0, 0), 64, 181},
+	{RTE_IPV6(0x993d, 0, 0, 0, 0, 0, 0, 0), 16, 206},
+	{RTE_IPV6(0x16fa, 0x82c9, 0x84f8, 0xbd6c, 0, 0, 0, 0), 63, 122},
+	{RTE_IPV6(0x9ea5, 0xea12, 0x2c3d, 0x523d, 0xeb00, 0, 0, 0), 72, 81},
+	{RTE_IPV6(0xec39, 0x7c6e, 0x7cda, 0x5246, 0x8e4e, 0x1280, 0, 0), 95, 175},
+	{RTE_IPV6(0x5ed1, 0xc8c9, 0x95a2, 0xf886, 0xefe2, 0x01ed, 0x1086, 0x3800), 118, 170},
+	{RTE_IPV6(0xbb2a, 0x1f90, 0xec46, 0, 0, 0, 0, 0), 47, 174},
+	{RTE_IPV6(0x5ad6, 0xb980, 0, 0, 0, 0, 0, 0), 29, 104},
+	{RTE_IPV6(0xc2dc, 0xd3d4, 0xd320, 0xc462, 0x473e, 0x9967, 0x5023, 0x8000), 114, 113},
+	{RTE_IPV6(0x18ff, 0x9e40, 0xb494, 0x0a51, 0xf3f7, 0, 0, 0), 80, 89},
+	{RTE_IPV6(0xe79b, 0x64f2, 0x70a0, 0xa05f, 0x62fd, 0xdb15, 0xef5a, 0), 113, 151},
+	{RTE_IPV6(0xe160, 0, 0, 0, 0, 0, 0, 0), 11, 108},
+	{RTE_IPV6(0x8800, 0, 0, 0, 0, 0, 0, 0), 7, 224},
+	{RTE_IPV6(0xfa80, 0, 0, 0, 0, 0, 0, 0), 9, 95},
+	{RTE_IPV6(0x48a8, 0, 0, 0, 0, 0, 0, 0), 14, 173},
+	{RTE_IPV6(0xb933, 0x33a7, 0x122c, 0x243b, 0x2387, 0x1468, 0, 0), 93, 176},
+	{RTE_IPV6(0x3992, 0xfc3c, 0xc544, 0x27a2, 0x50c6, 0x8932, 0x615c, 0x7c00), 119, 84},
+	{RTE_IPV6(0xfe2e, 0xf269, 0x565e, 0x600e, 0x82b0, 0, 0, 0), 78, 104},
+	{RTE_IPV6(0xf7ca, 0xb04c, 0x4528, 0, 0, 0, 0, 0), 49, 236},
+	{RTE_IPV6(0x32e9, 0xcb4d, 0x2a15, 0x73a3, 0xa68a, 0xc034, 0xb225, 0x7000), 116, 153},
+	{RTE_IPV6(0x3ec0, 0, 0, 0, 0, 0, 0, 0), 11, 190},
+	{RTE_IPV6(0x3560, 0, 0, 0, 0, 0, 0, 0), 13, 202},
+	{RTE_IPV6(0xc600, 0, 0, 0, 0, 0, 0, 0), 8, 54},
+	{RTE_IPV6(0xbdea, 0x6af7, 0x4000, 0, 0, 0, 0, 0), 34, 156},
+	{RTE_IPV6(0x6e18, 0xe441, 0xd893, 0x0930, 0x3cb3, 0xac5b, 0x73b9, 0xe360), 126, 245},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 6, 218},
+	{RTE_IPV6(0x4ab1, 0x59da, 0xf812, 0xb027, 0x76ad, 0xc998, 0, 0), 93, 72},
+	{RTE_IPV6(0x1f0d, 0x995c, 0x1b7a, 0x96e8, 0x585f, 0xcaab, 0xd09e, 0), 112, 183},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 183},
+	{RTE_IPV6(0x3f25, 0x2e9e, 0x8b80, 0, 0, 0, 0, 0), 42, 241},
+	{RTE_IPV6(0x35d1, 0x3b0d, 0xca46, 0, 0, 0, 0, 0), 47, 106},
+	{RTE_IPV6(0xb82c, 0x95dd, 0xb400, 0, 0, 0, 0, 0), 40, 180},
+	{RTE_IPV6(0xde86, 0x253e, 0xdfc1, 0x27f6, 0x0f97, 0xc892, 0, 0), 96, 142},
+	{RTE_IPV6(0xc7b0, 0xbd25, 0xe9b1, 0xfcd8, 0x5eaf, 0xfd77, 0x6000, 0), 100, 6},
+	{RTE_IPV6(0x2cc3, 0xc96a, 0xd178, 0x7a26, 0x2b1e, 0x8e16, 0xc4af, 0x6400), 118, 33},
+	{RTE_IPV6(0x21a6, 0x0aae, 0x4000, 0, 0, 0, 0, 0), 34, 224},
+	{RTE_IPV6(0x3601, 0xbdc3, 0x8531, 0x2450, 0x8ac8, 0, 0, 0), 78, 14},
+	{RTE_IPV6(0xf100, 0, 0, 0, 0, 0, 0, 0), 10, 149},
+	{RTE_IPV6(0xdd83, 0x04f7, 0x7059, 0xbb77, 0xdb50, 0x7a9c, 0xd8a0, 0), 108, 131},
+	{RTE_IPV6(0x6614, 0x2e81, 0xcaf7, 0x8101, 0xed47, 0x673a, 0xd92c, 0x0400), 121, 133},
+	{RTE_IPV6(0x6b9c, 0x972c, 0xd762, 0xab7e, 0x5520, 0x2a80, 0, 0), 89, 33},
+	{RTE_IPV6(0x3619, 0x4650, 0, 0, 0, 0, 0, 0), 28, 204},
+	{RTE_IPV6(0x95d3, 0xf20e, 0x70db, 0xb000, 0, 0, 0, 0), 52, 43},
+	{RTE_IPV6(0x5f1a, 0x8fc1, 0x084c, 0x4000, 0, 0, 0, 0), 51, 168},
+	{RTE_IPV6(0x3f66, 0xf4b0, 0, 0, 0, 0, 0, 0), 28, 180},
+	{RTE_IPV6(0x4055, 0x7ce2, 0x3bef, 0x4082, 0x447a, 0x5d4a, 0x2025, 0), 112, 208},
+	{RTE_IPV6(0x715a, 0xfd95, 0x03da, 0x22d7, 0x038f, 0xc040, 0, 0), 90, 25},
+	{RTE_IPV6(0x4be7, 0x2105, 0x0b5e, 0x7568, 0x963c, 0x48a1, 0x6026, 0), 111, 50},
+	{RTE_IPV6(0x340d, 0xf801, 0xfb0e, 0x321d, 0xd47b, 0x82b1, 0x6560, 0), 109, 110},
+	{RTE_IPV6(0xf8dd, 0x9684, 0xfc52, 0x6002, 0x50e8, 0x61ef, 0xfd40, 0), 109, 21},
+	{RTE_IPV6(0x884d, 0xa4a1, 0xc000, 0, 0, 0, 0, 0), 36, 147},
+	{RTE_IPV6(0x0121, 0x42fe, 0x9080, 0, 0, 0, 0, 0), 43, 56},
+	{RTE_IPV6(0xb519, 0xbae1, 0x6dbe, 0x4c9e, 0x767a, 0x1440, 0x7d37, 0x0800), 117, 144},
+	{RTE_IPV6(0xbfbb, 0xa08c, 0x1106, 0x5078, 0xecd4, 0x6890, 0x8000, 0), 100, 198},
+	{RTE_IPV6(0xc93d, 0x96fe, 0x464d, 0xd6d3, 0xaba3, 0xf540, 0, 0), 90, 235},
+	{RTE_IPV6(0x8fe2, 0xbe32, 0xfc00, 0, 0, 0, 0, 0), 38, 105},
+	{RTE_IPV6(0x41a8, 0xe224, 0xc950, 0, 0, 0, 0, 0), 45, 138},
+	{RTE_IPV6(0x8828, 0x415a, 0x2f10, 0x8000, 0, 0, 0, 0), 49, 122},
+	{RTE_IPV6(0x5ebd, 0xe0c8, 0xaa0b, 0x4fac, 0, 0, 0, 0), 65, 193},
+	{RTE_IPV6(0xec29, 0xa9ea, 0x0e80, 0, 0, 0, 0, 0), 43, 231},
+	{RTE_IPV6(0x0128, 0x8c5f, 0x51ad, 0xfaf8, 0, 0, 0, 0), 64, 250},
+	{RTE_IPV6(0x53b0, 0x9270, 0x599c, 0x39dc, 0x7d30, 0x2c00, 0, 0), 86, 24},
+	{RTE_IPV6(0x4c7d, 0xe4f9, 0xf3a0, 0x6a00, 0, 0, 0, 0), 55, 191},
+	{RTE_IPV6(0x0acb, 0xcc31, 0xd473, 0x7d04, 0xef7a, 0x5122, 0x01c6, 0xd800), 117, 111},
+	{RTE_IPV6(0x4ad6, 0x172c, 0xd328, 0xa13d, 0xedbe, 0x9b3b, 0xad2a, 0), 111, 205},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 1, 133},
+	{RTE_IPV6(0x7f00, 0x823d, 0xd105, 0xe823, 0x232a, 0x7234, 0xa9ea, 0xbf00), 122, 122},
+	{RTE_IPV6(0xc96b, 0xd20d, 0xbb3e, 0x911c, 0x1fbd, 0x3800, 0, 0), 87, 227},
+	{RTE_IPV6(0x93ab, 0x3f91, 0x2fa0, 0, 0, 0, 0, 0), 46, 53},
+	{RTE_IPV6(0x5de8, 0x0a61, 0x15f3, 0xd587, 0xc800, 0, 0, 0), 72, 224},
+	{RTE_IPV6(0x9079, 0x2940, 0, 0, 0, 0, 0, 0), 26, 199},
+	{RTE_IPV6(0x7469, 0x8000, 0, 0, 0, 0, 0, 0), 17, 79},
+	{RTE_IPV6(0x8e95, 0x1800, 0, 0, 0, 0, 0, 0), 21, 19},
+	{RTE_IPV6(0x6100, 0xe49e, 0x32e9, 0xfbf9, 0x0042, 0xc5e2, 0, 0), 96, 211},
+	{RTE_IPV6(0x72e4, 0xc79b, 0xaf68, 0x1ad5, 0x42f9, 0x78da, 0xa4fc, 0xd400), 120, 6},
+	{RTE_IPV6(0xe0a6, 0x4cc8, 0x793c, 0x6e41, 0x3c5f, 0x89be, 0x5cda, 0xda00), 121, 143},
+	{RTE_IPV6(0x8bdb, 0x5ce0, 0, 0, 0, 0, 0, 0), 31, 135},
+	{RTE_IPV6(0xcbed, 0x40bd, 0x1c0d, 0x4bc5, 0xdbf3, 0xac03, 0x8e20, 0), 109, 21},
+	{RTE_IPV6(0xedba, 0x58fe, 0x7c00, 0, 0, 0, 0, 0), 38, 220},
+	{RTE_IPV6(0xb6e6, 0x5da2, 0x8119, 0x38c4, 0x7000, 0, 0, 0), 68, 151},
+	{RTE_IPV6(0xf52d, 0x45e2, 0x5ad4, 0xfe10, 0, 0, 0, 0), 60, 111},
+	{RTE_IPV6(0x6be5, 0xf000, 0, 0, 0, 0, 0, 0), 20, 63},
+	{RTE_IPV6(0x77d0, 0xb1eb, 0xdefc, 0xdb00, 0, 0, 0, 0), 57, 112},
+	{RTE_IPV6(0xb297, 0xdca2, 0x7880, 0, 0, 0, 0, 0), 41, 48},
+	{RTE_IPV6(0x6d1a, 0x5faa, 0xa697, 0x8953, 0xe252, 0x0572, 0xfdd2, 0x120c), 126, 100},
+	{RTE_IPV6(0x7e1b, 0xfc13, 0xdb81, 0x7930, 0, 0, 0, 0), 60, 156},
+	{RTE_IPV6(0xd3c3, 0x9891, 0x9a5d, 0xe4d7, 0x8765, 0x1c52, 0, 0), 95, 120},
+	{RTE_IPV6(0xfc6b, 0, 0, 0, 0, 0, 0, 0), 16, 5},
+	{RTE_IPV6(0xc000, 0, 0, 0, 0, 0, 0, 0), 4, 103},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 4, 84},
+	{RTE_IPV6(0xe1b3, 0x2b2b, 0xde91, 0xcdee, 0xa49e, 0x93e5, 0x3800, 0), 101, 24},
+	{RTE_IPV6(0xd07f, 0x9718, 0x4071, 0x2f55, 0xd14f, 0x9000, 0, 0), 86, 81},
+	{RTE_IPV6(0xb290, 0xcb68, 0, 0, 0, 0, 0, 0), 29, 96},
+	{RTE_IPV6(0x38e3, 0x8b04, 0x5657, 0xb401, 0xd7a7, 0xed9c, 0x6f40, 0x2f00), 121, 6},
+	{RTE_IPV6(0x504c, 0xcc77, 0xaca9, 0xfe51, 0x68a6, 0xdb2c, 0xada1, 0xd400), 119, 40},
+	{RTE_IPV6(0x818d, 0x8b22, 0xf165, 0xdf90, 0, 0, 0, 0), 62, 143},
+	{RTE_IPV6(0x5566, 0x8962, 0x4167, 0x368e, 0x9000, 0, 0, 0), 68, 69},
+	{RTE_IPV6(0x381f, 0x9f0d, 0xc98b, 0xa11f, 0x5989, 0x0400, 0, 0), 92, 48},
+	{RTE_IPV6(0xe5dd, 0x36d8, 0xdf1b, 0xc401, 0, 0, 0, 0), 64, 115},
+	{RTE_IPV6(0x0590, 0xb02b, 0xb4bb, 0x1431, 0x3b49, 0x6c22, 0x5320, 0xc000), 115, 130},
+	{RTE_IPV6(0x18d9, 0xcdc1, 0x4a7b, 0xa06a, 0x674a, 0xc800, 0, 0), 86, 57},
+	{RTE_IPV6(0xf700, 0, 0, 0, 0, 0, 0, 0), 8, 97},
+	{RTE_IPV6(0x0c80, 0, 0, 0, 0, 0, 0, 0), 9, 146},
+	{RTE_IPV6(0xa01c, 0xc977, 0x945d, 0xfb76, 0x1cb3, 0x7b34, 0x47e8, 0x3000), 117, 194},
+	{RTE_IPV6(0x987e, 0x1136, 0x6538, 0x8201, 0xcd29, 0xcf5a, 0x977b, 0x8000), 114, 129},
+	{RTE_IPV6(0x4da5, 0x1def, 0x5ff2, 0x2201, 0x0bcc, 0x87ef, 0x8000, 0), 97, 159},
+	{RTE_IPV6(0xb76c, 0x9276, 0x4abe, 0x078d, 0x095c, 0x0202, 0x08da, 0x7800), 117, 242},
+	{RTE_IPV6(0x2598, 0x1def, 0xf235, 0x388f, 0xdb16, 0x0e9e, 0x3100, 0), 104, 162},
+	{RTE_IPV6(0xc635, 0xf166, 0xf0f4, 0x61cb, 0x3e80, 0xd5d6, 0xdc00, 0), 102, 140},
+	{RTE_IPV6(0x9059, 0x302a, 0xf9e7, 0xbdb2, 0xe8c7, 0x1e3a, 0x3f39, 0), 113, 77},
+	{RTE_IPV6(0x44d4, 0xb17b, 0x2ce0, 0x13ac, 0x5957, 0xc000, 0, 0), 82, 121},
+	{RTE_IPV6(0xfc1d, 0xb3e0, 0x0479, 0xcd43, 0x9800, 0, 0, 0), 69, 102},
+	{RTE_IPV6(0x1c6e, 0xa400, 0, 0, 0, 0, 0, 0), 23, 28},
+	{RTE_IPV6(0x1858, 0xe701, 0x0447, 0x47f1, 0xfc0e, 0xc500, 0, 0), 89, 154},
+	{RTE_IPV6(0x3f83, 0x2b4c, 0x3a8c, 0xa34a, 0x9e50, 0, 0, 0), 76, 39},
+	{RTE_IPV6(0x381c, 0x9395, 0x625d, 0xd8d8, 0xcb9c, 0, 0, 0), 78, 163},
+	{RTE_IPV6(0x86a9, 0x0667, 0xa1f4, 0x8675, 0x1000, 0, 0, 0), 68, 42},
+	{RTE_IPV6(0x8ff7, 0x7dbe, 0x6a32, 0xcc62, 0xfa97, 0xa160, 0, 0), 92, 207},
+	{RTE_IPV6(0xeb00, 0, 0, 0, 0, 0, 0, 0), 8, 25},
+	{RTE_IPV6(0x2e48, 0, 0, 0, 0, 0, 0, 0), 18, 150},
+	{RTE_IPV6(0xab23, 0x8075, 0x4a1d, 0xc743, 0x6db0, 0, 0, 0), 76, 103},
+	{RTE_IPV6(0xdce9, 0xec70, 0x8788, 0xd72b, 0x2a00, 0, 0, 0), 71, 155},
+	{RTE_IPV6(0xe40b, 0x9075, 0xcec0, 0x7619, 0x8d4e, 0x0469, 0, 0), 96, 142},
+	{RTE_IPV6(0xc343, 0xc2e5, 0x0e35, 0x8107, 0x1ed0, 0x2664, 0xb63b, 0), 112, 2},
+	{RTE_IPV6(0x194c, 0, 0, 0, 0, 0, 0, 0), 19, 59},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 4, 112},
+	{RTE_IPV6(0x1acb, 0xd998, 0x10bb, 0, 0, 0, 0, 0), 48, 166},
+	{RTE_IPV6(0xfad5, 0x0eeb, 0x6eab, 0xae17, 0x6680, 0, 0, 0), 73, 62},
+	{RTE_IPV6(0xafe6, 0xa00d, 0xbb11, 0, 0, 0, 0, 0), 50, 176},
+	{RTE_IPV6(0x5c9b, 0x9c5d, 0xbf49, 0x1c52, 0xbb81, 0x3905, 0x1000, 0), 100, 6},
+	{RTE_IPV6(0x2dcb, 0x0308, 0, 0, 0, 0, 0, 0), 29, 26},
+	{RTE_IPV6(0x7800, 0, 0, 0, 0, 0, 0, 0), 5, 6},
+	{RTE_IPV6(0xd800, 0, 0, 0, 0, 0, 0, 0), 5, 13},
+	{RTE_IPV6(0x87d7, 0x0047, 0x1800, 0, 0, 0, 0, 0), 37, 41},
+	{RTE_IPV6(0xdd95, 0x0128, 0x7000, 0, 0, 0, 0, 0), 36, 135},
+	{RTE_IPV6(0x5f8f, 0xffc2, 0x029d, 0xbf71, 0x0ae5, 0xcc38, 0, 0), 93, 171},
+	{RTE_IPV6(0xcad4, 0x6000, 0, 0, 0, 0, 0, 0), 19, 20},
+	{RTE_IPV6(0x93cb, 0xee78, 0xc217, 0x193a, 0xd0b1, 0xa900, 0, 0), 89, 119},
+	{RTE_IPV6(0x89aa, 0x71fc, 0xd7c2, 0xe092, 0xe957, 0x56c0, 0x1a2e, 0), 112, 49},
+	{RTE_IPV6(0xe000, 0, 0, 0, 0, 0, 0, 0), 4, 141},
+	{RTE_IPV6(0xfa5a, 0xf1ae, 0xa348, 0, 0, 0, 0, 0), 47, 132},
+	{RTE_IPV6(0x42be, 0xca90, 0x7a56, 0x1667, 0x6ba4, 0x3936, 0xe480, 0), 105, 176},
+	{RTE_IPV6(0x4c40, 0, 0, 0, 0, 0, 0, 0), 12, 186},
+	{RTE_IPV6(0x78f6, 0x0134, 0xbba3, 0x4e69, 0xe000, 0, 0, 0), 67, 93},
+	{RTE_IPV6(0x89f2, 0x8847, 0x620a, 0x3561, 0xa055, 0x847f, 0xb9de, 0), 111, 242},
+	{RTE_IPV6(0xff85, 0xb500, 0, 0, 0, 0, 0, 0), 24, 163},
+	{RTE_IPV6(0x80b1, 0x5c9b, 0x5bc0, 0, 0, 0, 0, 0), 42, 184},
+	{RTE_IPV6(0x2d78, 0xbac0, 0xf0c7, 0xb25f, 0x2000, 0, 0, 0), 68, 188},
+	{RTE_IPV6(0x9762, 0x67fe, 0x5a06, 0x0a6d, 0x0e9e, 0x451d, 0x8ced, 0x28e8), 126, 193},
+	{RTE_IPV6(0x94a4, 0x5155, 0x4c0e, 0x5440, 0x59b0, 0, 0, 0), 78, 63},
+	{RTE_IPV6(0x91bb, 0xa588, 0x581e, 0x6bbf, 0xcd78, 0x77d8, 0x9e7b, 0x4000), 115, 160},
+	{RTE_IPV6(0x4e78, 0x1cf3, 0xd8b4, 0x5713, 0xfd10, 0x6e21, 0xe418, 0xe800), 117, 251},
+	{RTE_IPV6(0x4a06, 0xa6a6, 0xb79d, 0x6054, 0x9700, 0, 0, 0), 72, 228},
+	{RTE_IPV6(0x5960, 0x04dd, 0xd6fd, 0x3a31, 0x0900, 0, 0, 0), 72, 168},
+	{RTE_IPV6(0x6109, 0x4000, 0, 0, 0, 0, 0, 0), 18, 194},
+	{RTE_IPV6(0xd5d7, 0x2dc8, 0xaa78, 0, 0, 0, 0, 0), 47, 166},
+	{RTE_IPV6(0x050e, 0x5c00, 0x1cf5, 0x82ca, 0x2028, 0xcf4d, 0xa6aa, 0xf640), 122, 210},
+	{RTE_IPV6(0x4d2d, 0x2b47, 0xca00, 0x9d92, 0x3b5b, 0xe100, 0, 0), 89, 254},
+	{RTE_IPV6(0x65ae, 0x5ea8, 0xa2ab, 0x470c, 0x10e0, 0, 0, 0), 75, 49},
+	{RTE_IPV6(0x3a11, 0xbbc2, 0x5749, 0xd767, 0xb40c, 0x2842, 0, 0), 96, 95},
+	{RTE_IPV6(0xa05b, 0x4451, 0x8000, 0, 0, 0, 0, 0), 33, 193},
+	{RTE_IPV6(0x5e70, 0xf90d, 0xa7f5, 0x6540, 0, 0, 0, 0), 58, 155},
+	{RTE_IPV6(0xecc2, 0, 0, 0, 0, 0, 0, 0), 15, 133},
+	{RTE_IPV6(0xa8f3, 0x67dd, 0x7800, 0, 0, 0, 0, 0), 38, 10},
+	{RTE_IPV6(0x56c2, 0xdabc, 0x8000, 0, 0, 0, 0, 0), 33, 31},
+	{RTE_IPV6(0xe803, 0x8643, 0x3fc4, 0x560e, 0xaaf3, 0x4d86, 0xbb8c, 0x4812), 127, 98},
+	{RTE_IPV6(0x37fd, 0x13c9, 0xc747, 0xe5da, 0x3640, 0x0ca2, 0, 0), 96, 22},
+	{RTE_IPV6(0x8e22, 0x2000, 0, 0, 0, 0, 0, 0), 20, 214},
+	{RTE_IPV6(0xd510, 0xd032, 0x6421, 0xc000, 0, 0, 0, 0), 50, 217},
+	{RTE_IPV6(0x75ed, 0x84b9, 0xb8f6, 0x4f2a, 0x6762, 0xa2f3, 0x8000, 0), 98, 102},
+	{RTE_IPV6(0x7819, 0xd6de, 0x3d9d, 0xcb66, 0x0392, 0xc000, 0, 0), 83, 169},
+	{RTE_IPV6(0xde2e, 0xfe40, 0, 0, 0, 0, 0, 0), 27, 152},
+	{RTE_IPV6(0xfe46, 0x9eab, 0x0bf5, 0xdf61, 0x4611, 0x1bc0, 0xba00, 0), 103, 214},
+	{RTE_IPV6(0xc080, 0xe411, 0x4414, 0x2c1f, 0x3422, 0xd401, 0xe000, 0), 99, 178},
+	{RTE_IPV6(0xede5, 0xcb08, 0x79b0, 0, 0, 0, 0, 0), 45, 164},
+	{RTE_IPV6(0x0600, 0, 0, 0, 0, 0, 0, 0), 7, 15},
+	{RTE_IPV6(0x47c5, 0xfb7a, 0x8ae8, 0x0cf1, 0x74f0, 0, 0, 0), 76, 94},
+	{RTE_IPV6(0x12f1, 0x87d2, 0xe936, 0x79b9, 0x0400, 0, 0, 0), 70, 239},
+	{RTE_IPV6(0x2032, 0xd53f, 0x49d9, 0xb415, 0xbb80, 0, 0, 0), 73, 82},
+	{RTE_IPV6(0xcba6, 0xe949, 0x5cb6, 0xd400, 0, 0, 0, 0), 55, 54},
+	{RTE_IPV6(0x38a2, 0x7e04, 0x12c3, 0xc040, 0xa49c, 0x77c4, 0x4000, 0), 98, 47},
+	{RTE_IPV6(0x7857, 0x5188, 0xb4b3, 0x4494, 0xf326, 0x5000, 0, 0), 84, 214},
+	{RTE_IPV6(0x40f4, 0xc132, 0x3000, 0, 0, 0, 0, 0), 37, 215},
+	{RTE_IPV6(0x5ba8, 0xfd9e, 0x8353, 0x9fa3, 0x71a9, 0x7000, 0, 0), 84, 153},
+	{RTE_IPV6(0x9f67, 0x6684, 0x6f2e, 0x124d, 0x240f, 0x8921, 0xb11f, 0xf3c0), 122, 245},
+	{RTE_IPV6(0x7b00, 0, 0, 0, 0, 0, 0, 0), 8, 118},
+	{RTE_IPV6(0x4351, 0xe2be, 0x074f, 0x47fa, 0x9bf5, 0x2c51, 0xd7d5, 0xabe0), 123, 128},
+	{RTE_IPV6(0x6700, 0, 0, 0, 0, 0, 0, 0), 8, 7},
+	{RTE_IPV6(0xf62c, 0xa8c8, 0xc6ee, 0x34c4, 0x7d73, 0, 0, 0), 80, 152},
+	{RTE_IPV6(0xcd0e, 0xbafc, 0xefd5, 0x3b77, 0x6925, 0x8cd1, 0x04e7, 0), 114, 248},
+	{RTE_IPV6(0x465b, 0xfe6a, 0x5e47, 0xaa13, 0x9ef2, 0xc000, 0, 0), 85, 143},
+	{RTE_IPV6(0xfa56, 0xe9b8, 0, 0, 0, 0, 0, 0), 30, 159},
+	{RTE_IPV6(0x7ade, 0x0200, 0, 0, 0, 0, 0, 0), 24, 11},
+	{RTE_IPV6(0x1be0, 0xeb46, 0, 0, 0, 0, 0, 0), 31, 110},
+	{RTE_IPV6(0xef64, 0xe003, 0x2e7f, 0x96fb, 0xcc78, 0xe440, 0, 0), 97, 181},
+	{RTE_IPV6(0x9073, 0xb6ce, 0x920d, 0x156f, 0x2546, 0xb381, 0xad52, 0x5d80), 121, 4},
+	{RTE_IPV6(0x49be, 0x39f3, 0x3133, 0x0fd1, 0, 0, 0, 0), 67, 101},
+	{RTE_IPV6(0x1240, 0, 0, 0, 0, 0, 0, 0), 11, 38},
+	{RTE_IPV6(0x1725, 0xecb1, 0xba07, 0xd187, 0x722c, 0, 0, 0), 78, 57},
+	{RTE_IPV6(0xc830, 0, 0, 0, 0, 0, 0, 0), 17, 142},
+	{RTE_IPV6(0xb5ff, 0x9900, 0, 0, 0, 0, 0, 0), 24, 184},
+	{RTE_IPV6(0x87a8, 0x0680, 0, 0, 0, 0, 0, 0), 27, 91},
+	{RTE_IPV6(0xc8e0, 0x21f5, 0x7800, 0, 0, 0, 0, 0), 41, 224},
+	{RTE_IPV6(0x466f, 0x0a3e, 0xc8e0, 0x26cc, 0x0ea4, 0, 0, 0), 78, 114},
+	{RTE_IPV6(0x9e85, 0xfc12, 0xf20c, 0x103c, 0x0534, 0xfbb3, 0x26eb, 0x0c00), 118, 184},
+	{RTE_IPV6(0x0217, 0x7430, 0, 0, 0, 0, 0, 0), 28, 215},
+	{RTE_IPV6(0x2119, 0xaa4a, 0xd786, 0x97b5, 0xafe8, 0x149b, 0xbdf2, 0x0d00), 120, 167},
+	{RTE_IPV6(0xa0ba, 0xdab7, 0xa754, 0x3b98, 0x0d89, 0x5080, 0, 0), 89, 233},
+	{RTE_IPV6(0x208d, 0xc400, 0, 0, 0, 0, 0, 0), 29, 101},
+	{RTE_IPV6(0xcf18, 0xcae2, 0xbf88, 0x4e7c, 0xa000, 0, 0, 0), 67, 139},
+	{RTE_IPV6(0xd2ad, 0xac1b, 0xc539, 0x7292, 0xa920, 0, 0, 0), 79, 32},
+	{RTE_IPV6(0x5f71, 0x0c7b, 0, 0, 0, 0, 0, 0), 32, 57},
+	{RTE_IPV6(0x816c, 0xba1c, 0x13e5, 0x6086, 0xc7fe, 0xc740, 0, 0), 91, 151},
+	{RTE_IPV6(0x67e2, 0x267b, 0x23c7, 0, 0, 0, 0, 0), 49, 0},
+	{RTE_IPV6(0x2975, 0x2b23, 0xd073, 0x4940, 0, 0, 0, 0), 63, 227},
+	{RTE_IPV6(0x2adc, 0x3d22, 0xc7b7, 0x2a10, 0xdf87, 0x0087, 0xd596, 0x6400), 118, 124},
+	{RTE_IPV6(0xa5e3, 0x60f3, 0x70ab, 0x756a, 0x3225, 0x523c, 0x5000, 0), 104, 228},
+	{RTE_IPV6(0x9e3c, 0x6f60, 0, 0, 0, 0, 0, 0), 27, 64},
+	{RTE_IPV6(0x7c6c, 0x5800, 0, 0, 0, 0, 0, 0), 25, 179},
+	{RTE_IPV6(0xe844, 0x849f, 0x9c67, 0x5fbe, 0x4c00, 0, 0, 0), 70, 107},
+	{RTE_IPV6(0x464d, 0xf0d1, 0x483f, 0x3f2d, 0x7d4f, 0x4d29, 0x0d00, 0), 104, 206},
+	{RTE_IPV6(0x92fe, 0x0705, 0x44f0, 0x43ed, 0x7000, 0, 0, 0), 68, 95},
+	{RTE_IPV6(0xa2df, 0x751b, 0x029c, 0x5eaa, 0x9d72, 0xa232, 0, 0), 96, 219},
+	{RTE_IPV6(0xa13e, 0xbf44, 0xef49, 0x6425, 0xa8fe, 0x8bca, 0xfc41, 0x4a00), 119, 138},
+	{RTE_IPV6(0xf87a, 0x7351, 0x0f9e, 0x8800, 0, 0, 0, 0), 53, 84},
+	{RTE_IPV6(0x0880, 0, 0, 0, 0, 0, 0, 0), 11, 161},
+	{RTE_IPV6(0x8e60, 0x6985, 0xfb39, 0x8000, 0, 0, 0, 0), 52, 25},
+	{RTE_IPV6(0x8ac4, 0x8b83, 0xe95d, 0x41f2, 0x56a9, 0x0748, 0x5280, 0), 107, 113},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 46},
+	{RTE_IPV6(0xaf97, 0x4bee, 0x1a0c, 0x64ba, 0, 0, 0, 0), 63, 72},
+	{RTE_IPV6(0x52cd, 0xd3b0, 0xaa4f, 0x3999, 0xa1da, 0x2030, 0, 0), 93, 230},
+	{RTE_IPV6(0xe37b, 0xe84a, 0xecca, 0xd379, 0xc808, 0x3bbd, 0x51db, 0x9000), 117, 142},
+	{RTE_IPV6(0xcdc4, 0x595a, 0x6780, 0, 0, 0, 0, 0), 41, 134},
+	{RTE_IPV6(0x3f91, 0x177f, 0x66d8, 0x3124, 0xa8a4, 0x3b85, 0x1292, 0), 112, 100},
+	{RTE_IPV6(0xd548, 0x9a10, 0xe6ec, 0xdacb, 0xdf33, 0x1ffb, 0x6740, 0), 109, 45},
+	{RTE_IPV6(0x7e94, 0xe898, 0, 0, 0, 0, 0, 0), 30, 219},
+	{RTE_IPV6(0xa048, 0, 0, 0, 0, 0, 0, 0), 16, 52},
+	{RTE_IPV6(0x8926, 0x9214, 0x63bc, 0x537b, 0x9f9f, 0x4000, 0, 0), 83, 240},
+	{RTE_IPV6(0x7be4, 0x242c, 0xf21d, 0x33e4, 0x8c3c, 0xed00, 0, 0), 90, 13},
+	{RTE_IPV6(0xa3a9, 0x1959, 0xbe72, 0xa59e, 0x8cd2, 0xc000, 0, 0), 84, 191},
+	{RTE_IPV6(0xe126, 0x4659, 0xdaec, 0x3c05, 0x45a3, 0xf832, 0xa340, 0), 106, 95},
+	{RTE_IPV6(0x5b5e, 0x2430, 0, 0, 0, 0, 0, 0), 28, 65},
+	{RTE_IPV6(0xd1ee, 0x6e00, 0x0298, 0, 0, 0, 0, 0), 45, 195},
+	{RTE_IPV6(0x3911, 0xe0a4, 0x455f, 0x8aac, 0x6f37, 0xefa7, 0xa000, 0), 103, 21},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 114},
+	{RTE_IPV6(0x6660, 0xdf1c, 0, 0, 0, 0, 0, 0), 31, 92},
+	{RTE_IPV6(0x89cc, 0x964b, 0xc100, 0, 0, 0, 0, 0), 42, 237},
+	{RTE_IPV6(0x8838, 0xfcf0, 0x5530, 0xf8e7, 0x1131, 0x2fee, 0x0fe9, 0x9fb8), 125, 172},
+	{RTE_IPV6(0x391f, 0x847b, 0xeaff, 0x2552, 0xa7cc, 0x259e, 0x8000, 0), 98, 116},
+	{RTE_IPV6(0x37c6, 0x8bdb, 0xa19c, 0x8c00, 0, 0, 0, 0), 55, 54},
+	{RTE_IPV6(0x2c00, 0, 0, 0, 0, 0, 0, 0), 8, 203},
+	{RTE_IPV6(0x3526, 0, 0, 0, 0, 0, 0, 0), 16, 74},
+	{RTE_IPV6(0xe33e, 0x6bec, 0x769c, 0x3c22, 0x1fb3, 0x4cdd, 0, 0), 96, 220},
+	{RTE_IPV6(0x6928, 0xf0d8, 0x5b3d, 0x1380, 0xe000, 0, 0, 0), 67, 219},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 179},
+	{RTE_IPV6(0x768e, 0xfbf9, 0x8069, 0x7110, 0, 0, 0, 0), 61, 194},
+	{RTE_IPV6(0x6546, 0xc4ee, 0, 0, 0, 0, 0, 0), 32, 187},
+	{RTE_IPV6(0xf5ad, 0xa5b1, 0xc8a1, 0x4000, 0, 0, 0, 0), 50, 79},
+	{RTE_IPV6(0x00c6, 0, 0, 0, 0, 0, 0, 0), 19, 87},
+	{RTE_IPV6(0x5c00, 0, 0, 0, 0, 0, 0, 0), 8, 126},
+	{RTE_IPV6(0x7d00, 0, 0, 0, 0, 0, 0, 0), 11, 106},
+	{RTE_IPV6(0x383b, 0x2352, 0x6548, 0, 0, 0, 0, 0), 50, 96},
+	{RTE_IPV6(0xb848, 0x4dfb, 0x08a6, 0, 0, 0, 0, 0), 47, 45},
+	{RTE_IPV6(0x8f4a, 0x84cd, 0xdaf7, 0x1ea0, 0x91c7, 0x8a0c, 0x59dc, 0), 110, 8},
+	{RTE_IPV6(0x1eb2, 0x6fe1, 0x494f, 0xad34, 0, 0, 0, 0), 62, 226},
+	{RTE_IPV6(0xe030, 0x9ae7, 0x2000, 0, 0, 0, 0, 0), 36, 222},
+	{RTE_IPV6(0x7b90, 0xaa8f, 0x55a9, 0x82f5, 0xd600, 0, 0, 0), 71, 218},
+	{RTE_IPV6(0xa6e0, 0xd464, 0x9537, 0x23d2, 0xf66c, 0x29f5, 0x7fae, 0x8000), 116, 59},
+	{RTE_IPV6(0x4b00, 0, 0, 0, 0, 0, 0, 0), 8, 80},
+	{RTE_IPV6(0xc580, 0xbe57, 0x2f35, 0x5c40, 0, 0, 0, 0), 58, 177},
+	{RTE_IPV6(0xf90a, 0x4cd9, 0xe114, 0x7ccd, 0x2c9f, 0xbe08, 0, 0), 98, 44},
+	{RTE_IPV6(0xb4e2, 0x00a7, 0x89e8, 0xae78, 0x715f, 0x16b8, 0, 0), 93, 206},
+	{RTE_IPV6(0x7b99, 0x66c0, 0, 0, 0, 0, 0, 0), 27, 64},
+	{RTE_IPV6(0x0590, 0xce9e, 0xefbd, 0xab78, 0x452e, 0x80ed, 0, 0), 96, 236},
+	{RTE_IPV6(0x9feb, 0x3c00, 0, 0, 0, 0, 0, 0), 22, 101},
+	{RTE_IPV6(0x2ac2, 0x9600, 0, 0, 0, 0, 0, 0), 26, 49},
+	{RTE_IPV6(0xcd60, 0, 0, 0, 0, 0, 0, 0), 11, 179},
+	{RTE_IPV6(0x1341, 0x8d14, 0x7f4d, 0x46cd, 0x9773, 0x9d17, 0x7680, 0), 109, 112},
+	{RTE_IPV6(0x600b, 0xd628, 0xf5fb, 0x3d40, 0x80f1, 0xb7b7, 0, 0), 96, 31},
+	{RTE_IPV6(0x7804, 0xeb70, 0x2280, 0, 0, 0, 0, 0), 41, 111},
+	{RTE_IPV6(0x6e7f, 0xcf4c, 0x6494, 0x82ce, 0xf902, 0x6800, 0, 0), 86, 65},
+	{RTE_IPV6(0xe2be, 0xbff9, 0xad60, 0x7fc8, 0x3e14, 0, 0, 0), 78, 222},
+	{RTE_IPV6(0x5958, 0xb60e, 0x4e7a, 0xd5c0, 0, 0, 0, 0), 58, 4},
+	{RTE_IPV6(0xa75e, 0xa3e3, 0x1c6f, 0x7567, 0xe000, 0, 0, 0), 67, 67},
+	{RTE_IPV6(0x39dc, 0x3574, 0xf3b8, 0xf286, 0x1046, 0x533d, 0xa180, 0), 109, 197},
+	{RTE_IPV6(0x3feb, 0x1400, 0, 0, 0, 0, 0, 0), 22, 121},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 167},
+	{RTE_IPV6(0x0f9f, 0x2aa7, 0x4c00, 0, 0, 0, 0, 0), 38, 140},
+	{RTE_IPV6(0xd8fc, 0x7128, 0xef2e, 0xac30, 0x67fa, 0x52b3, 0x8840, 0), 106, 193},
+	{RTE_IPV6(0x9e93, 0x102c, 0x7c38, 0x2c30, 0x8a40, 0xa900, 0, 0), 90, 47},
+	{RTE_IPV6(0xeeee, 0x6000, 0, 0, 0, 0, 0, 0), 21, 187},
+	{RTE_IPV6(0x3f9f, 0xb1a2, 0x6ad4, 0xac00, 0, 0, 0, 0), 56, 102},
+	{RTE_IPV6(0x3b28, 0xfcb9, 0xbbd8, 0, 0, 0, 0, 0), 46, 237},
+	{RTE_IPV6(0x02da, 0x0b44, 0xadc4, 0x10df, 0x0212, 0x7ad7, 0x9a00, 0), 103, 237},
+	{RTE_IPV6(0x0309, 0xce49, 0x6cc4, 0xb777, 0x8da2, 0x0ab4, 0x7320, 0), 107, 115},
+	{RTE_IPV6(0x11e3, 0xd092, 0x3fc9, 0x49ef, 0x1d4f, 0x5000, 0, 0), 84, 217},
+	{RTE_IPV6(0x73b4, 0xb0f1, 0x34d1, 0x0640, 0xbd4c, 0, 0, 0), 79, 21},
+	{RTE_IPV6(0xbf58, 0x62f5, 0x5b2e, 0x89fe, 0xaa50, 0x0b37, 0xd41c, 0x8000), 113, 3},
+	{RTE_IPV6(0x618d, 0xabaf, 0x16e9, 0, 0, 0, 0, 0), 48, 62},
+	{RTE_IPV6(0x20cc, 0x66bf, 0xa4f2, 0, 0, 0, 0, 0), 47, 80},
+	{RTE_IPV6(0x1d85, 0xd2fc, 0x7c42, 0xa000, 0, 0, 0, 0), 51, 184},
+	{RTE_IPV6(0xcfb3, 0x3690, 0x7443, 0x1d40, 0x0dc7, 0, 0, 0), 80, 197},
+	{RTE_IPV6(0x81d8, 0, 0, 0, 0, 0, 0, 0), 13, 63},
+	{RTE_IPV6(0x3298, 0xf98f, 0xaeea, 0xf030, 0x9eff, 0x5069, 0, 0), 99, 62},
+	{RTE_IPV6(0x69d0, 0x5fda, 0x2c0b, 0x5786, 0x6d12, 0x8a42, 0x1145, 0x8000), 114, 231},
+	{RTE_IPV6(0x974f, 0x9edc, 0x7a65, 0xd2a4, 0x4000, 0, 0, 0), 67, 158},
+	{RTE_IPV6(0xec61, 0x579b, 0xfe89, 0x7ad0, 0xa8c9, 0xc276, 0xe000, 0), 101, 118},
+	{RTE_IPV6(0x0ee5, 0xc1f8, 0, 0, 0, 0, 0, 0), 30, 237},
+	{RTE_IPV6(0x2e9a, 0x3250, 0x5c93, 0x9e56, 0x0170, 0, 0, 0), 79, 15},
+	{RTE_IPV6(0x5883, 0x1554, 0x3e56, 0x076e, 0x8efb, 0xf26e, 0xc2af, 0xf700), 122, 84},
+	{RTE_IPV6(0xe5d8, 0x6f5c, 0xad20, 0x3f46, 0x2454, 0x064a, 0x88a6, 0x2600), 119, 205},
+	{RTE_IPV6(0x7993, 0xd8f5, 0x25bd, 0x923f, 0x914a, 0x8000, 0, 0), 82, 220},
+	{RTE_IPV6(0x2c1a, 0xfe0b, 0xb500, 0, 0, 0, 0, 0), 40, 42},
+	{RTE_IPV6(0xd172, 0x61f9, 0xe39f, 0xe000, 0, 0, 0, 0), 51, 144},
+	{RTE_IPV6(0xb8f4, 0x2b75, 0x3800, 0, 0, 0, 0, 0), 37, 74},
+	{RTE_IPV6(0x3c51, 0x8000, 0, 0, 0, 0, 0, 0), 19, 89},
+	{RTE_IPV6(0x1228, 0x1571, 0xe25b, 0xc358, 0xa113, 0x8e00, 0, 0), 88, 77},
+	{RTE_IPV6(0x3900, 0xd49e, 0x3833, 0x6cc6, 0x3b05, 0x89c4, 0, 0), 94, 2},
+	{RTE_IPV6(0xa8fc, 0, 0, 0, 0, 0, 0, 0), 14, 75},
+	{RTE_IPV6(0x40b5, 0xfe67, 0x01e6, 0x75c7, 0x8000, 0, 0, 0), 65, 18},
+	{RTE_IPV6(0xd430, 0xd67f, 0x4eb0, 0, 0, 0, 0, 0), 46, 246},
+	{RTE_IPV6(0x9bb9, 0xeca3, 0xcc31, 0x8178, 0xb72f, 0x0af3, 0x415c, 0xc000), 114, 10},
+	{RTE_IPV6(0x5ec8, 0, 0, 0, 0, 0, 0, 0), 14, 207},
+	{RTE_IPV6(0x13d2, 0x8871, 0x494f, 0x84c4, 0xe000, 0, 0, 0), 68, 41},
+	{RTE_IPV6(0x18cb, 0xf6f2, 0xf1df, 0x96ed, 0xd5ca, 0x0b80, 0, 0), 89, 102},
+	{RTE_IPV6(0x733b, 0xabdd, 0xacb5, 0xaa43, 0x73cd, 0x2c6b, 0xa243, 0x3800), 118, 118},
+	{RTE_IPV6(0xfa80, 0, 0, 0, 0, 0, 0, 0), 10, 146},
+	{RTE_IPV6(0xcbf0, 0x1c9e, 0xb60c, 0x56b6, 0x8e2f, 0x8f39, 0xef00, 0), 104, 122},
+	{RTE_IPV6(0xc4da, 0x6d34, 0x0200, 0x4099, 0x22fa, 0xf0b9, 0x7500, 0), 107, 6},
+	{RTE_IPV6(0x8983, 0xbf28, 0x48d1, 0x4a40, 0, 0, 0, 0), 58, 18},
+	{RTE_IPV6(0xec7e, 0xa725, 0xb914, 0x22cf, 0x4c00, 0, 0, 0), 70, 83},
+	{RTE_IPV6(0x81c0, 0xf589, 0xfb34, 0x4b44, 0x5170, 0x9285, 0x4000, 0), 99, 90},
+	{RTE_IPV6(0x071f, 0x9413, 0, 0, 0, 0, 0, 0), 32, 140},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 242},
+	{RTE_IPV6(0xa732, 0xcab3, 0x4a92, 0xc000, 0, 0, 0, 0), 50, 31},
+	{RTE_IPV6(0x2cbc, 0xbafa, 0xe547, 0x1c76, 0x23fd, 0xf5bf, 0xc712, 0), 111, 9},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 230},
+	{RTE_IPV6(0x9ca3, 0xd7af, 0x4750, 0, 0, 0, 0, 0), 47, 50},
+	{RTE_IPV6(0x4318, 0x97c6, 0xf280, 0, 0, 0, 0, 0), 41, 34},
+	{RTE_IPV6(0x866b, 0x8000, 0, 0, 0, 0, 0, 0), 17, 11},
+	{RTE_IPV6(0x2300, 0, 0, 0, 0, 0, 0, 0), 8, 71},
+	{RTE_IPV6(0x2ec4, 0x5400, 0, 0, 0, 0, 0, 0), 22, 146},
+	{RTE_IPV6(0x52ac, 0x081a, 0x9a22, 0x7dbc, 0x0595, 0x9f2c, 0x4ede, 0xecb0), 124, 249},
+	{RTE_IPV6(0x4e9d, 0x4f46, 0xfc00, 0, 0, 0, 0, 0), 39, 143},
+	{RTE_IPV6(0xe705, 0xd2f7, 0xc605, 0x9dbf, 0xcee1, 0x958e, 0xcf28, 0), 110, 17},
+	{RTE_IPV6(0x26fe, 0xebc7, 0xbf3c, 0x2b9f, 0xbef3, 0xcbb9, 0xb8da, 0x8400), 119, 60},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 1, 162},
+	{RTE_IPV6(0x5ff0, 0, 0, 0, 0, 0, 0, 0), 12, 5},
+	{RTE_IPV6(0x1180, 0xf4b2, 0xa04e, 0x535c, 0, 0, 0, 0), 62, 139},
+	{RTE_IPV6(0x1266, 0x3efb, 0x2c00, 0, 0, 0, 0, 0), 39, 8},
+	{RTE_IPV6(0x1e4b, 0x6c28, 0xe7a6, 0xe9dc, 0xa3b0, 0xfcd2, 0x3c1e, 0x8000), 114, 246},
+	{RTE_IPV6(0x1203, 0xcf40, 0x1980, 0, 0, 0, 0, 0), 42, 171},
+	{RTE_IPV6(0x3453, 0xeb3d, 0xa4ec, 0x53ad, 0x8f69, 0x0e00, 0, 0), 88, 206},
+	{RTE_IPV6(0xa6af, 0xbad0, 0, 0, 0, 0, 0, 0), 28, 163},
+	{RTE_IPV6(0xdd9a, 0x5262, 0x297e, 0x5534, 0, 0, 0, 0), 62, 166},
+	{RTE_IPV6(0x5e54, 0xb678, 0xcce8, 0x4000, 0, 0, 0, 0), 51, 128},
+	{RTE_IPV6(0x1bae, 0xe3e4, 0, 0, 0, 0, 0, 0), 31, 59},
+	{RTE_IPV6(0xda0c, 0x049c, 0, 0, 0, 0, 0, 0), 32, 179},
+	{RTE_IPV6(0x0905, 0xbec3, 0x3cd8, 0x5096, 0x8075, 0x5680, 0x8070, 0x62d0), 124, 87},
+	{RTE_IPV6(0x07e2, 0x6870, 0xd409, 0xac7c, 0xd179, 0xaae5, 0x2cb2, 0x8000), 114, 29},
+	{RTE_IPV6(0x2f47, 0xae4c, 0x3453, 0x1712, 0x6a30, 0x3820, 0, 0), 91, 184},
+	{RTE_IPV6(0x33a8, 0, 0, 0, 0, 0, 0, 0), 14, 45},
+	{RTE_IPV6(0x1cb6, 0xa77c, 0x1c16, 0x5000, 0, 0, 0, 0), 55, 144},
+	{RTE_IPV6(0x223d, 0x0e33, 0xfd11, 0x13aa, 0x31ce, 0xbccf, 0xf7a7, 0xc000), 114, 119},
+	{RTE_IPV6(0x02eb, 0x120e, 0xc342, 0xed1e, 0, 0, 0, 0), 64, 113},
+	{RTE_IPV6(0x33b6, 0x8e85, 0x7f60, 0x9f84, 0x63a1, 0x4000, 0, 0), 82, 50},
+	{RTE_IPV6(0xaa91, 0xe67b, 0xd7bd, 0x4900, 0, 0, 0, 0), 56, 207},
+	{RTE_IPV6(0x97a6, 0x2000, 0, 0, 0, 0, 0, 0), 21, 3},
+	{RTE_IPV6(0x108d, 0xc481, 0x84cf, 0x0700, 0, 0, 0, 0), 58, 13},
+	{RTE_IPV6(0xcd19, 0xb8bf, 0xc9ce, 0x6de0, 0, 0, 0, 0), 59, 42},
+	{RTE_IPV6(0x3072, 0x2167, 0xf7ff, 0x4000, 0, 0, 0, 0), 50, 31},
+	{RTE_IPV6(0xb39c, 0x7792, 0x7d15, 0x2a92, 0xedd5, 0xbf84, 0, 0), 94, 30},
+	{RTE_IPV6(0xb381, 0xba90, 0, 0, 0, 0, 0, 0), 29, 94},
+	{RTE_IPV6(0x11b3, 0xd9bc, 0x80d4, 0x0404, 0x9800, 0, 0, 0), 71, 190},
+	{RTE_IPV6(0x843f, 0x4a59, 0xd140, 0x3fc0, 0, 0, 0, 0), 59, 238},
+	{RTE_IPV6(0x1032, 0xf858, 0, 0, 0, 0, 0, 0), 30, 20},
+	{RTE_IPV6(0xbd60, 0x3a35, 0xbfeb, 0x4000, 0, 0, 0, 0), 51, 84},
+	{RTE_IPV6(0x6f62, 0x0641, 0x23c0, 0, 0, 0, 0, 0), 42, 108},
+	{RTE_IPV6(0x76df, 0x53dc, 0x6e7a, 0x1770, 0xb99b, 0x4900, 0, 0), 89, 136},
+	{RTE_IPV6(0xadbf, 0x96c5, 0xcc23, 0xa94f, 0x1fd6, 0xfbf0, 0, 0), 93, 196},
+	{RTE_IPV6(0x1a4c, 0x8130, 0, 0, 0, 0, 0, 0), 28, 67},
+	{RTE_IPV6(0xe760, 0, 0, 0, 0, 0, 0, 0), 12, 104},
+	{RTE_IPV6(0x5dac, 0xdffc, 0xcb00, 0xce00, 0, 0, 0, 0), 55, 15},
+	{RTE_IPV6(0x358e, 0xcb7c, 0x6833, 0xf10c, 0xa111, 0x65f5, 0x786e, 0xc0c7), 128, 237},
+	{RTE_IPV6(0x094d, 0x78c5, 0xc10a, 0xedae, 0xe902, 0xa50b, 0xe52f, 0x9000), 116, 224},
+	{RTE_IPV6(0x63a1, 0xbd58, 0x3000, 0, 0, 0, 0, 0), 36, 179},
+	{RTE_IPV6(0x1208, 0x4c42, 0x02b9, 0xce84, 0xe000, 0, 0, 0), 67, 84},
+	{RTE_IPV6(0xa935, 0x2000, 0, 0, 0, 0, 0, 0), 22, 65},
+	{RTE_IPV6(0x8800, 0, 0, 0, 0, 0, 0, 0), 5, 178},
+	{RTE_IPV6(0x83a2, 0x907c, 0x0c62, 0xf200, 0, 0, 0, 0), 55, 154},
+	{RTE_IPV6(0x4b32, 0x81c0, 0, 0, 0, 0, 0, 0), 27, 106},
+	{RTE_IPV6(0xd4b7, 0x28e1, 0x9888, 0xae5b, 0, 0, 0, 0), 67, 125},
+	{RTE_IPV6(0x9e00, 0, 0, 0, 0, 0, 0, 0), 9, 118},
+	{RTE_IPV6(0x0730, 0x8495, 0xa9d4, 0xc689, 0xca00, 0, 0, 0), 73, 52},
+	{RTE_IPV6(0xadc3, 0x81a3, 0x8df9, 0x2840, 0, 0, 0, 0), 58, 173},
+	{RTE_IPV6(0x6d4f, 0x4bdb, 0xcdb6, 0x16f5, 0xdf11, 0x924e, 0x6d77, 0x8000), 113, 8},
+	{RTE_IPV6(0xaec3, 0x18b6, 0xd7c6, 0xd656, 0x2280, 0, 0, 0), 74, 211},
+	{RTE_IPV6(0x1628, 0x336d, 0x465b, 0x9838, 0, 0, 0, 0), 61, 253},
+	{RTE_IPV6(0xa973, 0xf67e, 0x4176, 0xdbc0, 0, 0, 0, 0), 59, 47},
+	{RTE_IPV6(0x9a25, 0x467c, 0x6b7b, 0xe8f1, 0xa48e, 0x47e2, 0xb67e, 0), 112, 73},
+	{RTE_IPV6(0x066c, 0, 0, 0, 0, 0, 0, 0), 19, 192},
+	{RTE_IPV6(0xd8a7, 0x9e9e, 0xde13, 0x601c, 0x2806, 0x460c, 0x931b, 0x55f0), 128, 55},
+	{RTE_IPV6(0x48de, 0x3445, 0x45ce, 0xa36a, 0xebce, 0x5080, 0, 0), 94, 147},
+	{RTE_IPV6(0x9670, 0x6a38, 0x0ff3, 0x9a61, 0x866e, 0xa014, 0xb790, 0xea08), 125, 86},
+	{RTE_IPV6(0x3aba, 0x6a3a, 0x7cab, 0x3555, 0x2164, 0x4000, 0, 0), 82, 16},
+	{RTE_IPV6(0x07c3, 0x161f, 0x3ed9, 0xd12e, 0x5a31, 0xbd32, 0xa87e, 0), 111, 167},
+	{RTE_IPV6(0x5c2c, 0x9fc6, 0xb95e, 0xe7b1, 0x4000, 0, 0, 0), 67, 148},
+	{RTE_IPV6(0xa96c, 0xbea2, 0x1727, 0x8c00, 0, 0, 0, 0), 54, 66},
+	{RTE_IPV6(0xa105, 0x030b, 0x9e9d, 0xa6d4, 0xf616, 0x8c65, 0x5c00, 0), 104, 70},
+	{RTE_IPV6(0x47bf, 0, 0, 0, 0, 0, 0, 0), 16, 166},
+	{RTE_IPV6(0x3088, 0xc291, 0x3960, 0, 0, 0, 0, 0), 44, 109},
+	{RTE_IPV6(0x9000, 0, 0, 0, 0, 0, 0, 0), 5, 226},
+	{RTE_IPV6(0xdfd1, 0x0a39, 0, 0, 0, 0, 0, 0), 32, 8},
+	{RTE_IPV6(0x9a4f, 0xaa09, 0x2b8b, 0xf9b0, 0xba48, 0xd800, 0, 0), 85, 218},
+	{RTE_IPV6(0x0108, 0x7bcd, 0xa786, 0x8066, 0x0a48, 0, 0, 0), 78, 54},
+	{RTE_IPV6(0x1f69, 0x304d, 0x67bb, 0x6343, 0x6000, 0, 0, 0), 67, 48},
+	{RTE_IPV6(0x0e49, 0x364c, 0xe823, 0x2000, 0, 0, 0, 0), 51, 244},
+	{RTE_IPV6(0x0e6d, 0xfbbe, 0x24fd, 0x6378, 0x5e40, 0, 0, 0), 74, 50},
+	{RTE_IPV6(0x7aaa, 0x0986, 0x7c5b, 0x1800, 0, 0, 0, 0), 54, 173},
+	{RTE_IPV6(0xf60a, 0x5558, 0x52d9, 0x5f38, 0xd8cb, 0xa000, 0, 0), 84, 245},
+	{RTE_IPV6(0x4d64, 0x72cf, 0x96b1, 0x4586, 0x4a83, 0x9375, 0xb140, 0xd280), 121, 54},
+	{RTE_IPV6(0xab7b, 0x168a, 0x84e5, 0xfa51, 0xbae3, 0x921b, 0xaacd, 0x8000), 113, 86},
+	{RTE_IPV6(0x8000, 0, 0, 0, 0, 0, 0, 0), 2, 115},
+	{RTE_IPV6(0x0c23, 0x3200, 0, 0, 0, 0, 0, 0), 24, 144},
+	{RTE_IPV6(0xff7c, 0xb3a5, 0xa9fa, 0x42ab, 0xdf7d, 0xf700, 0, 0), 89, 171},
+	{RTE_IPV6(0xf4eb, 0xd30a, 0xfbff, 0xce06, 0xc60c, 0x3288, 0, 0), 93, 231},
+	{RTE_IPV6(0xdd4d, 0xed29, 0x3221, 0x6718, 0x197f, 0xd000, 0, 0), 88, 34},
+	{RTE_IPV6(0xd845, 0x2f35, 0x7518, 0x3800, 0, 0, 0, 0), 53, 225},
+	{RTE_IPV6(0xb457, 0x19ec, 0x3800, 0, 0, 0, 0, 0), 38, 174},
+	{RTE_IPV6(0x6e20, 0x1822, 0x7485, 0xf580, 0x7b5f, 0x7d7a, 0x6481, 0x8000), 113, 37},
+	{RTE_IPV6(0x1b75, 0xb370, 0x8589, 0x6ec1, 0xf6c9, 0xdb41, 0x38ea, 0x6a80), 121, 39},
+	{RTE_IPV6(0xba75, 0xfc00, 0, 0, 0, 0, 0, 0), 23, 59},
+	{RTE_IPV6(0xf377, 0x3610, 0x8000, 0, 0, 0, 0, 0), 34, 96},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 147},
+	{RTE_IPV6(0x4e30, 0x75c8, 0xf576, 0x73f0, 0xaa7d, 0x5467, 0x21a8, 0), 110, 56},
+	{RTE_IPV6(0xc9fd, 0xb8fe, 0x8f51, 0x5f2a, 0xf393, 0x6091, 0x171a, 0), 111, 234},
+	{RTE_IPV6(0x29d7, 0x5488, 0xea00, 0, 0, 0, 0, 0), 46, 199},
+	{RTE_IPV6(0x5bf4, 0x89b8, 0xe75f, 0x870a, 0xb800, 0, 0, 0), 69, 191},
+	{RTE_IPV6(0x711f, 0xb5f5, 0x1500, 0, 0, 0, 0, 0), 40, 235},
+	{RTE_IPV6(0xb5d8, 0xc000, 0, 0, 0, 0, 0, 0), 20, 45},
+	{RTE_IPV6(0x571a, 0x77e5, 0x61ff, 0x092b, 0x2000, 0, 0, 0), 67, 164},
+	{RTE_IPV6(0xcd70, 0x43a3, 0xc494, 0x0569, 0x088a, 0x9003, 0xabd5, 0x9f80), 121, 130},
+	{RTE_IPV6(0x881b, 0xc000, 0, 0, 0, 0, 0, 0), 18, 166},
+	{RTE_IPV6(0x02af, 0x6f00, 0, 0, 0, 0, 0, 0), 24, 140},
+	{RTE_IPV6(0xde83, 0x55da, 0x10e5, 0x2ce6, 0xf34c, 0xfa8b, 0x01cb, 0x6c00), 118, 47},
+	{RTE_IPV6(0x65b4, 0x4d8e, 0xc249, 0xc4f6, 0x6b64, 0xc248, 0xcc7c, 0), 111, 148},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 103},
+	{RTE_IPV6(0x2e3e, 0xbf82, 0x6e80, 0xeb3e, 0x4427, 0x3a98, 0xcfcc, 0x6000), 116, 94},
+	{RTE_IPV6(0x6f0b, 0x6000, 0, 0, 0, 0, 0, 0), 19, 85},
+	{RTE_IPV6(0x3a2b, 0x0e5d, 0x66d2, 0x75d0, 0xdeab, 0x8229, 0x1010, 0), 109, 250},
+	{RTE_IPV6(0x8d34, 0, 0, 0, 0, 0, 0, 0), 16, 153},
+	{RTE_IPV6(0xaa99, 0xa0aa, 0x90eb, 0x7a08, 0x6a22, 0x1820, 0x6639, 0x0ca8), 125, 182},
+	{RTE_IPV6(0x2271, 0xa36b, 0x3db1, 0x27ac, 0xf202, 0x8200, 0, 0), 94, 23},
+	{RTE_IPV6(0xdebf, 0xef6e, 0xa2bf, 0xc3b5, 0x5032, 0x55f0, 0x5820, 0), 108, 38},
+	{RTE_IPV6(0xb352, 0xfd97, 0xd400, 0x48fd, 0xaf16, 0x224e, 0x3520, 0), 110, 121},
+	{RTE_IPV6(0x0aa2, 0x142e, 0xa440, 0x5801, 0xcacc, 0x7c00, 0, 0), 87, 146},
+	{RTE_IPV6(0xd263, 0, 0, 0, 0, 0, 0, 0), 16, 138},
+	{RTE_IPV6(0xb7c8, 0x0102, 0x3306, 0x428e, 0x144d, 0x30f4, 0, 0), 94, 149},
+	{RTE_IPV6(0x1d14, 0xe039, 0xcca1, 0x83fe, 0x3585, 0xa300, 0, 0), 88, 232},
+	{RTE_IPV6(0x4b3a, 0xaa34, 0x9250, 0, 0, 0, 0, 0), 45, 255},
+	{RTE_IPV6(0x5c15, 0x0171, 0xb958, 0xa000, 0, 0, 0, 0), 51, 148},
+	{RTE_IPV6(0x67b4, 0xdebb, 0x8175, 0, 0, 0, 0, 0), 48, 117},
+	{RTE_IPV6(0x204c, 0, 0, 0, 0, 0, 0, 0), 14, 237},
+	{RTE_IPV6(0x073c, 0x6000, 0, 0, 0, 0, 0, 0), 19, 113},
+	{RTE_IPV6(0xa77a, 0xcdb9, 0x15c7, 0x0600, 0, 0, 0, 0), 57, 162},
+	{RTE_IPV6(0x154e, 0, 0, 0, 0, 0, 0, 0), 21, 225},
+	{RTE_IPV6(0x5c9f, 0xa7a9, 0x88b0, 0x5fff, 0x5789, 0x7010, 0, 0), 92, 210},
+	{RTE_IPV6(0x5478, 0xa000, 0, 0, 0, 0, 0, 0), 22, 34},
+	{RTE_IPV6(0x7e05, 0x7eb0, 0, 0, 0, 0, 0, 0), 29, 224},
+	{RTE_IPV6(0x043a, 0, 0, 0, 0, 0, 0, 0), 16, 143},
+	{RTE_IPV6(0xef9a, 0xb5b6, 0xbdd3, 0xf435, 0x9000, 0, 0, 0), 68, 216},
+	{RTE_IPV6(0xfebc, 0x8ba7, 0x872f, 0x93ef, 0xbb6a, 0xe49c, 0xeaea, 0x6600), 120, 239},
+	{RTE_IPV6(0xe1a8, 0x8a5c, 0xc1ff, 0x2fe9, 0x0b9a, 0xcd56, 0xd158, 0), 111, 54},
+	{RTE_IPV6(0xdf00, 0, 0, 0, 0, 0, 0, 0), 9, 35},
+	{RTE_IPV6(0xebfc, 0x730a, 0x9768, 0xc1cf, 0x26e4, 0xe5f5, 0x2a0d, 0x6c00), 119, 230},
+	{RTE_IPV6(0x0189, 0x3524, 0xd2b8, 0, 0, 0, 0, 0), 45, 234},
+	{RTE_IPV6(0x95b6, 0x48c5, 0x5ce5, 0x090a, 0xdc80, 0x4813, 0x043a, 0xc000), 115, 70},
+	{RTE_IPV6(0x6949, 0x396c, 0, 0, 0, 0, 0, 0), 32, 246},
+	{RTE_IPV6(0xbd3d, 0xe618, 0xeb52, 0x3a66, 0x616f, 0x79fc, 0x9c5e, 0xbfa6), 127, 217},
+	{RTE_IPV6(0xc16c, 0xe756, 0x8c0e, 0xc004, 0x8750, 0x81a6, 0x9e3d, 0xe614), 128, 201},
+	{RTE_IPV6(0x6e4d, 0, 0, 0, 0, 0, 0, 0), 17, 49},
+	{RTE_IPV6(0x0366, 0x24e7, 0x0ff2, 0x8f00, 0, 0, 0, 0), 57, 2},
+	{RTE_IPV6(0x51bd, 0xdca8, 0xc000, 0, 0, 0, 0, 0), 38, 64},
+	{RTE_IPV6(0xa84b, 0x85b4, 0x5ba5, 0x4de8, 0, 0, 0, 0), 62, 239},
+	{RTE_IPV6(0x6ab3, 0xba6d, 0x51ea, 0xe9a7, 0x65a0, 0x5a66, 0xaeea, 0xd000), 116, 47},
+	{RTE_IPV6(0x2e69, 0xea15, 0x17f7, 0xa921, 0x2f05, 0, 0, 0), 80, 43},
+	{RTE_IPV6(0x9890, 0x648e, 0x8117, 0xe332, 0x4351, 0xf974, 0, 0), 94, 17},
+	{RTE_IPV6(0x6d4a, 0x91c0, 0, 0, 0, 0, 0, 0), 26, 5},
+	{RTE_IPV6(0x64f3, 0x16e6, 0x262c, 0x8056, 0x8439, 0, 0, 0), 81, 240},
+	{RTE_IPV6(0x99fb, 0x7341, 0x68b3, 0, 0, 0, 0, 0), 50, 197},
+	{RTE_IPV6(0x2b71, 0x3ce0, 0x2414, 0x2aa1, 0x18df, 0xc000, 0, 0), 84, 192},
+	{RTE_IPV6(0x3d4d, 0x79b0, 0x8a98, 0, 0, 0, 0, 0), 47, 160},
+	{RTE_IPV6(0x77c2, 0x9231, 0x3bf2, 0x19dc, 0x7a68, 0x5000, 0, 0), 84, 199},
+	{RTE_IPV6(0xfea2, 0x9b2f, 0xbb03, 0x0172, 0x8ebf, 0x982c, 0x901a, 0xca00), 127, 217},
+	{RTE_IPV6(0xb001, 0x722a, 0xbf91, 0x2b01, 0x8d12, 0x4000, 0, 0), 83, 75},
+	{RTE_IPV6(0xaaf4, 0x4384, 0x91a3, 0x4cd5, 0x55ed, 0xf816, 0xcf40, 0), 106, 222},
+	{RTE_IPV6(0x66be, 0x3a20, 0x4b0f, 0x59a3, 0x4007, 0xa800, 0, 0), 85, 39},
+	{RTE_IPV6(0x7caa, 0x232f, 0x98e0, 0, 0, 0, 0, 0), 45, 9},
+	{RTE_IPV6(0xc0dd, 0x14e4, 0, 0, 0, 0, 0, 0), 33, 217},
+	{RTE_IPV6(0xd0b2, 0x8800, 0, 0, 0, 0, 0, 0), 25, 142},
+	{RTE_IPV6(0xbc44, 0x4d1e, 0x4499, 0x66b4, 0, 0, 0, 0), 62, 18},
+	{RTE_IPV6(0x72b2, 0x79bc, 0xcde9, 0x234d, 0x22c5, 0x9eae, 0x6500, 0), 104, 180},
+	{RTE_IPV6(0xc362, 0x430c, 0x0d2b, 0x0600, 0, 0, 0, 0), 56, 205},
+	{RTE_IPV6(0x92be, 0x2ade, 0x0e36, 0x1c80, 0, 0, 0, 0), 57, 251},
+	{RTE_IPV6(0xb9ca, 0x9400, 0, 0, 0, 0, 0, 0), 22, 178},
+	{RTE_IPV6(0x8a1e, 0x815f, 0xe0a1, 0x7880, 0, 0, 0, 0), 59, 198},
+	{RTE_IPV6(0x45b5, 0x05e3, 0x4000, 0, 0, 0, 0, 0), 34, 84},
+	{RTE_IPV6(0x5ab4, 0x00a4, 0xe34b, 0xae77, 0x8000, 0, 0, 0), 66, 128},
+	{RTE_IPV6(0x143c, 0x3a77, 0xf5b1, 0xa2ba, 0x0d70, 0xd3ef, 0x8000, 0), 97, 75},
+	{RTE_IPV6(0x9e7c, 0x9d19, 0xe68b, 0x33d4, 0x4c6d, 0xecd2, 0x3000, 0), 101, 192},
+	{RTE_IPV6(0x7d6c, 0xf224, 0x5e0d, 0x246a, 0x5a33, 0x53d9, 0x8397, 0), 114, 60},
+	{RTE_IPV6(0xdeda, 0xa29e, 0x0f35, 0xbfb2, 0, 0, 0, 0), 63, 169},
+	{RTE_IPV6(0x68ca, 0x7f6d, 0x4910, 0x110c, 0, 0, 0, 0), 62, 10},
+	{RTE_IPV6(0xacab, 0xf61a, 0xb022, 0x1698, 0xf638, 0xad78, 0x693c, 0x5c00), 118, 64},
+	{RTE_IPV6(0xbe16, 0xabce, 0x6dba, 0xb380, 0xfdb6, 0x6cd4, 0xdca7, 0xabb4), 127, 182},
+	{RTE_IPV6(0x77d0, 0, 0, 0, 0, 0, 0, 0), 13, 29},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 39},
+	{RTE_IPV6(0xaa90, 0x4002, 0x6ba6, 0x8000, 0, 0, 0, 0), 51, 93},
+	{RTE_IPV6(0xea09, 0x6014, 0x9c9d, 0x0122, 0x5800, 0, 0, 0), 75, 228},
+	{RTE_IPV6(0x93ed, 0x1078, 0xc000, 0, 0, 0, 0, 0), 34, 236},
+	{RTE_IPV6(0xb6bd, 0xa29e, 0xdf5a, 0xad00, 0, 0, 0, 0), 56, 190},
+	{RTE_IPV6(0x7494, 0x8ef0, 0x0afd, 0x8000, 0, 0, 0, 0), 49, 217},
+	{RTE_IPV6(0xd349, 0x8c45, 0xfc1b, 0x4b2e, 0x2506, 0x9320, 0, 0), 93, 74},
+	{RTE_IPV6(0x943d, 0x7831, 0xdc41, 0x9600, 0, 0, 0, 0), 61, 180},
+	{RTE_IPV6(0xac23, 0xcab4, 0x814b, 0xf000, 0, 0, 0, 0), 55, 91},
+	{RTE_IPV6(0xd76d, 0x939d, 0x201c, 0x3400, 0, 0, 0, 0), 54, 230},
+	{RTE_IPV6(0x971a, 0xb670, 0xcddc, 0xa000, 0, 0, 0, 0), 52, 175},
+	{RTE_IPV6(0x495b, 0x5d3d, 0xc403, 0x421a, 0x9560, 0, 0, 0), 75, 171},
+	{RTE_IPV6(0xcba3, 0x34f7, 0x1c77, 0x38df, 0x8a46, 0xae61, 0x4d3b, 0x2e00), 120, 202},
+	{RTE_IPV6(0xfb32, 0xe4b2, 0xcac3, 0, 0, 0, 0, 0), 49, 113},
+	{RTE_IPV6(0xd99f, 0xa4c7, 0x0eed, 0xaab8, 0x64e7, 0x5cde, 0, 0), 96, 187},
+	{RTE_IPV6(0x10a1, 0x55c1, 0xca15, 0x039b, 0x3f74, 0x7ccb, 0x220d, 0xd700), 120, 38},
+	{RTE_IPV6(0x6f34, 0x0a00, 0, 0, 0, 0, 0, 0), 26, 35},
+	{RTE_IPV6(0x450c, 0x7497, 0x2000, 0, 0, 0, 0, 0), 38, 115},
+	{RTE_IPV6(0xbb3c, 0x6128, 0x7065, 0x6000, 0, 0, 0, 0), 51, 18},
+	{RTE_IPV6(0xe6c2, 0x88ff, 0xce00, 0, 0, 0, 0, 0), 39, 34},
+	{RTE_IPV6(0xb3ef, 0xaa6b, 0x030d, 0xd443, 0xb145, 0x0800, 0, 0), 87, 75},
+	{RTE_IPV6(0x0b3a, 0x8259, 0x8000, 0, 0, 0, 0, 0), 33, 232},
+	{RTE_IPV6(0xd9b2, 0x2bcb, 0xea14, 0xeaba, 0x9d58, 0x92c0, 0, 0), 91, 154},
+	{RTE_IPV6(0x06b4, 0xe000, 0, 0, 0, 0, 0, 0), 19, 195},
+	{RTE_IPV6(0x9d9a, 0xda9e, 0x27e0, 0x67e6, 0xa400, 0, 0, 0), 70, 122},
+	{RTE_IPV6(0xe10a, 0x8300, 0, 0, 0, 0, 0, 0), 24, 97},
+	{RTE_IPV6(0x1000, 0, 0, 0, 0, 0, 0, 0), 5, 220},
+	{RTE_IPV6(0xa600, 0, 0, 0, 0, 0, 0, 0), 7, 80},
+	{RTE_IPV6(0x1dbe, 0x83d7, 0xe8f6, 0x29e2, 0x34c0, 0, 0, 0), 77, 133},
+	{RTE_IPV6(0x8a4a, 0xa35d, 0x0c00, 0, 0, 0, 0, 0), 38, 93},
+	{RTE_IPV6(0xe540, 0x6129, 0x1cf3, 0xf9b9, 0x6123, 0x311b, 0xaf18, 0), 110, 176},
+	{RTE_IPV6(0x0649, 0x5ea0, 0xbad8, 0x5475, 0xe9a9, 0x92ea, 0, 0), 95, 68},
+	{RTE_IPV6(0xa328, 0xf251, 0xe023, 0x48c2, 0xb04e, 0xe0ae, 0x0c00, 0), 103, 247},
+	{RTE_IPV6(0x02cd, 0x2818, 0, 0, 0, 0, 0, 0), 29, 240},
+	{RTE_IPV6(0xaee1, 0xf0a0, 0xd408, 0xf643, 0x2400, 0, 0, 0), 74, 83},
+	{RTE_IPV6(0x0575, 0xb68d, 0xa6f9, 0xc400, 0, 0, 0, 0), 55, 132},
+	{RTE_IPV6(0x2e98, 0xa900, 0, 0, 0, 0, 0, 0), 24, 217},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 214},
+	{RTE_IPV6(0xe9ca, 0x9fdb, 0, 0, 0, 0, 0, 0), 34, 193},
+	{RTE_IPV6(0xac36, 0x9f05, 0x0ef5, 0x6ab6, 0x0200, 0, 0, 0), 71, 61},
+	{RTE_IPV6(0xf1de, 0xfb72, 0xc800, 0, 0, 0, 0, 0), 39, 65},
+	{RTE_IPV6(0x1ff3, 0xbe04, 0xcfc6, 0xf93b, 0xa77f, 0x5d40, 0, 0), 91, 108},
+	{RTE_IPV6(0xc923, 0xded0, 0, 0, 0, 0, 0, 0), 28, 244},
+	{RTE_IPV6(0xbb69, 0x0d72, 0xeec5, 0x9117, 0xa974, 0x5b1c, 0, 0), 95, 194},
+	{RTE_IPV6(0xfbfb, 0x79a8, 0x98b2, 0x93bc, 0xe57b, 0x9af2, 0xbea5, 0xad30), 124, 82},
+	{RTE_IPV6(0x42bb, 0xbfa4, 0x1fc4, 0x28ba, 0x9473, 0x8639, 0xdefe, 0x3000), 116, 45},
+	{RTE_IPV6(0xd111, 0x6f29, 0x9a5e, 0, 0, 0, 0, 0), 48, 224},
+	{RTE_IPV6(0x28f5, 0x8000, 0, 0, 0, 0, 0, 0), 22, 17},
+	{RTE_IPV6(0x4879, 0x9753, 0xaa50, 0, 0, 0, 0, 0), 44, 133},
+	{RTE_IPV6(0xabac, 0x65ee, 0xc994, 0x1751, 0x040b, 0x4000, 0, 0), 85, 125},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 42},
+	{RTE_IPV6(0x142e, 0x1b5d, 0xc3b8, 0x06a2, 0x6de1, 0x1698, 0, 0), 96, 140},
+	{RTE_IPV6(0xf37a, 0x1e50, 0, 0, 0, 0, 0, 0), 28, 91},
+	{RTE_IPV6(0x59fa, 0x5048, 0x9404, 0, 0, 0, 0, 0), 48, 92},
+	{RTE_IPV6(0xbb98, 0, 0, 0, 0, 0, 0, 0), 13, 125},
+	{RTE_IPV6(0xaca0, 0x8f72, 0x80ef, 0xae85, 0xb09a, 0x9f86, 0x0a00, 0), 106, 249},
+	{RTE_IPV6(0xfeca, 0x7170, 0xad00, 0, 0, 0, 0, 0), 40, 202},
+	{RTE_IPV6(0x5098, 0, 0, 0, 0, 0, 0, 0), 13, 107},
+	{RTE_IPV6(0xded8, 0, 0, 0, 0, 0, 0, 0), 13, 124},
+	{RTE_IPV6(0xdb8a, 0xfd0c, 0xbcc5, 0x3000, 0, 0, 0, 0), 53, 57},
+	{RTE_IPV6(0x7c29, 0xad08, 0xcac0, 0x3dfe, 0xae30, 0xef70, 0, 0), 92, 181},
+	{RTE_IPV6(0xc3ec, 0xf500, 0, 0, 0, 0, 0, 0), 24, 107},
+	{RTE_IPV6(0x5352, 0x2af4, 0x88bf, 0xc551, 0x5b9a, 0xd855, 0x1d96, 0xc616), 128, 101},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 102},
+	{RTE_IPV6(0x2c1e, 0xdbf8, 0xd658, 0xe184, 0, 0, 0, 0), 62, 136},
+	{RTE_IPV6(0x29ab, 0xceb2, 0xc350, 0, 0, 0, 0, 0), 45, 114},
+	{RTE_IPV6(0x9f0f, 0xb400, 0, 0, 0, 0, 0, 0), 22, 215},
+	{RTE_IPV6(0x2abc, 0x25ae, 0x5628, 0x0454, 0xaed8, 0, 0, 0), 79, 249},
+	{RTE_IPV6(0xb9e3, 0x55b1, 0xdb5f, 0xfae3, 0x459a, 0x7600, 0, 0), 88, 29},
+	{RTE_IPV6(0x16b9, 0xee64, 0x1910, 0, 0, 0, 0, 0), 44, 71},
+	{RTE_IPV6(0x7a95, 0x754d, 0x58fa, 0xbbcb, 0x8816, 0x552a, 0x69ea, 0x4f08), 127, 112},
+	{RTE_IPV6(0x5d98, 0xe58c, 0, 0, 0, 0, 0, 0), 31, 72},
+	{RTE_IPV6(0x8125, 0xa5a7, 0xf118, 0x2528, 0x0280, 0, 0, 0), 73, 155},
+	{RTE_IPV6(0x1eca, 0xb103, 0xfdca, 0xa4f8, 0, 0, 0, 0), 61, 66},
+	{RTE_IPV6(0xb019, 0xdc78, 0xc2e4, 0x0a2d, 0xe18e, 0xc060, 0, 0), 91, 77},
+	{RTE_IPV6(0x6000, 0, 0, 0, 0, 0, 0, 0), 3, 109},
+	{RTE_IPV6(0x5238, 0x0ccc, 0x3d2d, 0x93f0, 0xdd00, 0, 0, 0), 72, 37},
+	{RTE_IPV6(0xf226, 0xf029, 0x8c4b, 0xfa25, 0xaf73, 0x61e0, 0, 0), 91, 56},
+	{RTE_IPV6(0xfbc0, 0x175a, 0x8738, 0xfc38, 0x4fdb, 0x50a7, 0x1600, 0), 103, 5},
+	{RTE_IPV6(0x3e80, 0x8b80, 0, 0, 0, 0, 0, 0), 25, 15},
+	{RTE_IPV6(0xd601, 0x54e8, 0, 0, 0, 0, 0, 0), 29, 183},
+	{RTE_IPV6(0xcf5a, 0xed89, 0xab8c, 0xe358, 0xfa1a, 0xc5a2, 0xa300, 0), 105, 171},
+	{RTE_IPV6(0xc497, 0xebe8, 0x72f8, 0x01cf, 0xc1b8, 0xba47, 0x9d00, 0), 112, 202},
+	{RTE_IPV6(0x9800, 0, 0, 0, 0, 0, 0, 0), 5, 136},
+	{RTE_IPV6(0x09ae, 0xd3c8, 0x7800, 0, 0, 0, 0, 0), 37, 107},
+	{RTE_IPV6(0x5996, 0x5f1c, 0xd10d, 0x7d9f, 0xfef4, 0x6e00, 0, 0), 87, 193},
+	{RTE_IPV6(0x171c, 0xca0a, 0x5a9e, 0x4000, 0, 0, 0, 0), 52, 4},
+	{RTE_IPV6(0x3019, 0xb409, 0x54ec, 0x0690, 0x1ec6, 0x2938, 0, 0), 96, 68},
+	{RTE_IPV6(0xfc00, 0, 0, 0, 0, 0, 0, 0), 7, 40},
+	{RTE_IPV6(0x14a5, 0x3982, 0xa400, 0, 0, 0, 0, 0), 39, 255},
+	{RTE_IPV6(0xa738, 0x8000, 0, 0, 0, 0, 0, 0), 18, 108},
+	{RTE_IPV6(0x5bcc, 0, 0, 0, 0, 0, 0, 0), 14, 219},
+	{RTE_IPV6(0x182e, 0x0904, 0xaa96, 0x3882, 0x7f78, 0x7668, 0xa830, 0), 108, 12},
+	{RTE_IPV6(0x9c3c, 0xf5f7, 0xbd00, 0, 0, 0, 0, 0), 40, 84},
+	{RTE_IPV6(0x9468, 0xbbae, 0x811c, 0x7fa2, 0x5cde, 0x3412, 0, 0), 96, 33},
+	{RTE_IPV6(0x26fd, 0xb699, 0xe9c2, 0x9f29, 0x5ec1, 0xfea0, 0, 0), 91, 199},
+	{RTE_IPV6(0x9c4d, 0x69eb, 0x91d8, 0, 0, 0, 0, 0), 46, 52},
+	{RTE_IPV6(0x64d3, 0xee93, 0x41de, 0x6349, 0xfc71, 0x2e71, 0x3488, 0), 113, 184},
+	{RTE_IPV6(0x0d1e, 0, 0, 0, 0, 0, 0, 0), 18, 124},
+	{RTE_IPV6(0x1df0, 0x8de6, 0x4eed, 0x1987, 0x8306, 0x414d, 0x4df8, 0), 109, 128},
+	{RTE_IPV6(0x0fc0, 0x6d1f, 0x95dd, 0, 0, 0, 0, 0), 49, 255},
+	{RTE_IPV6(0x50b9, 0xaa47, 0x293a, 0x9e6a, 0xfd07, 0x02b8, 0xad00, 0), 105, 146},
+	{RTE_IPV6(0x10e5, 0xe000, 0, 0, 0, 0, 0, 0), 24, 172},
+	{RTE_IPV6(0xa902, 0x9909, 0xa9cb, 0xf59a, 0xb800, 0, 0, 0), 70, 116},
+	{RTE_IPV6(0x9087, 0xefa4, 0x8ebb, 0x406d, 0, 0, 0, 0), 66, 189},
+	{RTE_IPV6(0xaa4e, 0xfce3, 0xf2c7, 0x82fb, 0xc800, 0, 0, 0), 70, 10},
+	{RTE_IPV6(0xe812, 0x0f7e, 0xa67e, 0x3a19, 0xd13e, 0x4c4f, 0, 0), 98, 184},
+	{RTE_IPV6(0xaa52, 0x4835, 0x8000, 0, 0, 0, 0, 0), 33, 98},
+	{RTE_IPV6(0x9864, 0x257a, 0xf240, 0, 0, 0, 0, 0), 42, 37},
+	{RTE_IPV6(0xaee7, 0xe621, 0x47e0, 0, 0, 0, 0, 0), 46, 174},
+	{RTE_IPV6(0x4ae1, 0xfc99, 0xca08, 0xa227, 0x4000, 0, 0, 0), 67, 251},
+	{RTE_IPV6(0xa7ba, 0x65bb, 0x7a00, 0, 0, 0, 0, 0), 42, 115},
+	{RTE_IPV6(0x5307, 0x157a, 0xf343, 0xab92, 0x91a0, 0xa867, 0xdf40, 0), 107, 252},
+	{RTE_IPV6(0x5384, 0xdb56, 0x5600, 0, 0, 0, 0, 0), 40, 176},
+	{RTE_IPV6(0x1671, 0x4866, 0x4910, 0xec39, 0xc57a, 0x1f00, 0, 0), 91, 155},
+	{RTE_IPV6(0xfa3b, 0x4023, 0x4870, 0x9f55, 0xc805, 0xc127, 0x98b9, 0x9410), 124, 36},
+	{RTE_IPV6(0xdc15, 0x30a4, 0xe079, 0x1145, 0x0a76, 0x6a00, 0, 0), 88, 202},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 5, 208},
+	{RTE_IPV6(0xf740, 0x537d, 0xc3e1, 0x324c, 0x1268, 0, 0, 0), 77, 158},
+	{RTE_IPV6(0x4e5b, 0x1fca, 0xbd19, 0x0d85, 0xdc00, 0, 0, 0), 72, 136},
+	{RTE_IPV6(0x69c5, 0x1a50, 0, 0, 0, 0, 0, 0), 28, 191},
+	{RTE_IPV6(0x0e1f, 0x9af2, 0xf1e7, 0x3797, 0xdf38, 0x86ff, 0x71ce, 0x4500), 120, 126},
+	{RTE_IPV6(0xf7c1, 0x3ab0, 0x1047, 0x1f78, 0xd568, 0xe753, 0x1a76, 0x5b87), 128, 139},
+	{RTE_IPV6(0x8820, 0xf000, 0, 0, 0, 0, 0, 0), 25, 216},
+	{RTE_IPV6(0x64ee, 0x7028, 0, 0, 0, 0, 0, 0), 29, 93},
+	{RTE_IPV6(0x5000, 0, 0, 0, 0, 0, 0, 0), 4, 196},
+	{RTE_IPV6(0xe9e0, 0xfe39, 0x21cd, 0x8cd9, 0xb548, 0, 0, 0), 81, 119},
+	{RTE_IPV6(0x6b4b, 0x419e, 0x808e, 0xbfbc, 0xbcf0, 0x94f3, 0x7400, 0), 104, 93},
+	{RTE_IPV6(0x2746, 0x7872, 0x45ed, 0x5f30, 0xe9b0, 0x5b9a, 0, 0), 96, 183},
+	{RTE_IPV6(0x0a3d, 0x2b65, 0x4066, 0xd000, 0, 0, 0, 0), 52, 207},
+	{RTE_IPV6(0x9780, 0, 0, 0, 0, 0, 0, 0), 9, 102},
+	{RTE_IPV6(0xd2f1, 0, 0, 0, 0, 0, 0, 0), 19, 36},
+	{RTE_IPV6(0x34de, 0xf91f, 0x6c89, 0xc701, 0xf2ad, 0xb890, 0, 0), 93, 41},
+	{RTE_IPV6(0x7b6f, 0x58c0, 0x4546, 0, 0, 0, 0, 0), 47, 70},
+	{RTE_IPV6(0xb452, 0xbc7d, 0x8c08, 0xc44a, 0, 0, 0, 0), 63, 218},
+	{RTE_IPV6(0x4d9e, 0x2265, 0xc466, 0x38dc, 0x2a8f, 0xb5bb, 0xf040, 0xa100), 120, 226},
+	{RTE_IPV6(0x58dc, 0xde26, 0x176c, 0x0594, 0xb96e, 0x140e, 0x433d, 0), 114, 25},
+	{RTE_IPV6(0x5a41, 0xdca5, 0xc585, 0x6e5c, 0xe413, 0x0211, 0, 0), 98, 6},
+	{RTE_IPV6(0x2320, 0, 0, 0, 0, 0, 0, 0), 11, 26},
+	{RTE_IPV6(0x677b, 0x31d1, 0xe4e5, 0x9000, 0, 0, 0, 0), 52, 149},
+	{RTE_IPV6(0x32f4, 0x3abf, 0x5f9c, 0, 0, 0, 0, 0), 46, 127},
+	{RTE_IPV6(0x8ca9, 0x4b4d, 0x4e56, 0x2810, 0, 0, 0, 0), 62, 144},
+	{RTE_IPV6(0x63b0, 0xaf53, 0x7232, 0xd600, 0, 0, 0, 0), 56, 213},
+	{RTE_IPV6(0x13d0, 0xd34c, 0x55b0, 0xf740, 0, 0, 0, 0), 58, 115},
+	{RTE_IPV6(0x991c, 0xbc71, 0xd374, 0x07b2, 0x88cd, 0x6000, 0, 0), 83, 146},
+	{RTE_IPV6(0xa0b4, 0xdc80, 0, 0, 0, 0, 0, 0), 26, 58},
+	{RTE_IPV6(0xea06, 0x7013, 0x3d4a, 0, 0, 0, 0, 0), 47, 222},
+	{RTE_IPV6(0x616e, 0x2275, 0x9594, 0, 0, 0, 0, 0), 50, 16},
+	{RTE_IPV6(0x63ad, 0x7749, 0xfa1e, 0x901e, 0x8000, 0, 0, 0), 65, 169},
+	{RTE_IPV6(0xa986, 0x6f59, 0x0900, 0, 0, 0, 0, 0), 40, 175},
+	{RTE_IPV6(0x8650, 0xe32b, 0, 0, 0, 0, 0, 0), 32, 3},
+	{RTE_IPV6(0xe7f3, 0x2350, 0x4bcf, 0x8089, 0x36aa, 0x47ee, 0, 0), 96, 2},
+	{RTE_IPV6(0xbdbe, 0x7987, 0xa000, 0, 0, 0, 0, 0), 36, 193},
+	{RTE_IPV6(0x8f9b, 0xd8c1, 0xefcd, 0xcc99, 0x8fec, 0x4517, 0xc8d3, 0), 118, 151},
+	{RTE_IPV6(0x2001, 0x73f4, 0x21db, 0x6000, 0, 0, 0, 0), 51, 182},
+	{RTE_IPV6(0xdc80, 0, 0, 0, 0, 0, 0, 0), 10, 148},
+	{RTE_IPV6(0xce57, 0x87eb, 0x7480, 0, 0, 0, 0, 0), 42, 53},
+	{RTE_IPV6(0x9880, 0, 0, 0, 0, 0, 0, 0), 11, 87},
+	{RTE_IPV6(0x3a92, 0xbce9, 0xe6ec, 0xc0d6, 0xa880, 0, 0, 0), 73, 235},
+	{RTE_IPV6(0x54dc, 0x5200, 0, 0, 0, 0, 0, 0), 23, 51},
+	{RTE_IPV6(0x6a91, 0x8e2a, 0xbaba, 0x3a01, 0x3062, 0xa583, 0x309c, 0xc000), 116, 11},
+	{RTE_IPV6(0x35db, 0x78f2, 0xa6d6, 0x5182, 0x4000, 0, 0, 0), 68, 28},
+	{RTE_IPV6(0xf078, 0x4ca3, 0x20c5, 0xb5fb, 0x62dc, 0x1de2, 0, 0), 96, 73},
+	{RTE_IPV6(0xeac5, 0x0ca0, 0, 0, 0, 0, 0, 0), 28, 216},
+	{RTE_IPV6(0xbf5e, 0, 0, 0, 0, 0, 0, 0), 16, 99},
+	{RTE_IPV6(0xc808, 0, 0, 0, 0, 0, 0, 0), 18, 35},
+	{RTE_IPV6(0x1d81, 0x2f53, 0x134b, 0x9e01, 0x1c18, 0x1a93, 0x5277, 0x8c64), 127, 195},
+	{RTE_IPV6(0xf1ae, 0x1a35, 0x9870, 0xc886, 0x54bb, 0xb1b0, 0x2a40, 0), 108, 176},
+	{RTE_IPV6(0x4dab, 0x9130, 0xc354, 0xbe24, 0x7ac7, 0x1200, 0, 0), 87, 217},
+	{RTE_IPV6(0x6968, 0x8735, 0xe276, 0xeea9, 0x09fd, 0x84a2, 0xd97b, 0xbf60), 126, 244},
+	{RTE_IPV6(0xa000, 0, 0, 0, 0, 0, 0, 0), 3, 125},
+	{RTE_IPV6(0x2955, 0x8f80, 0x5b89, 0xc000, 0, 0, 0, 0), 50, 219},
+	{RTE_IPV6(0x746e, 0xc000, 0, 0, 0, 0, 0, 0), 18, 165},
+	{RTE_IPV6(0x4bd5, 0x2c10, 0x2b9d, 0x22ab, 0x6275, 0x6d97, 0x053c, 0xe000), 117, 6},
+	{RTE_IPV6(0xe517, 0x743d, 0x508b, 0xc800, 0, 0, 0, 0), 53, 47},
+	{RTE_IPV6(0x537b, 0x4a00, 0, 0, 0, 0, 0, 0), 23, 73},
+	{RTE_IPV6(0x97f3, 0x2dd9, 0xd89e, 0, 0, 0, 0, 0), 47, 98},
+	{RTE_IPV6(0xabb8, 0x6ed3, 0xed72, 0x9000, 0, 0, 0, 0), 52, 21},
+	{RTE_IPV6(0x07f6, 0xc777, 0, 0, 0, 0, 0, 0), 32, 142},
+	{RTE_IPV6(0x672f, 0x4611, 0x1fe8, 0x2c4b, 0x919b, 0x64d8, 0, 0), 93, 34},
+	{RTE_IPV6(0x41aa, 0xa964, 0xa793, 0x8efb, 0x1440, 0, 0, 0), 74, 41},
+	{RTE_IPV6(0xeb06, 0xe5f8, 0x9789, 0x2400, 0, 0, 0, 0), 55, 80},
+	{RTE_IPV6(0x9c27, 0x6000, 0, 0, 0, 0, 0, 0), 22, 11},
+	{RTE_IPV6(0x5cbc, 0x52c0, 0x8ef9, 0xbe80, 0, 0, 0, 0), 58, 254},
+	{RTE_IPV6(0xfdda, 0xb52e, 0x8690, 0, 0, 0, 0, 0), 45, 95},
+	{RTE_IPV6(0xbd13, 0x1ff4, 0x5000, 0, 0, 0, 0, 0), 40, 8},
+	{RTE_IPV6(0x1e74, 0, 0, 0, 0, 0, 0, 0), 14, 212},
+	{RTE_IPV6(0x51e2, 0x0dad, 0x4f7b, 0xdf7c, 0x6c50, 0x53ee, 0, 0), 95, 217},
+	{RTE_IPV6(0x7ed3, 0xce52, 0x93d7, 0x0600, 0, 0, 0, 0), 57, 15},
+	{RTE_IPV6(0x2ae5, 0x87c5, 0xc4f3, 0x5eb5, 0x8522, 0x1000, 0, 0), 84, 66},
+	{RTE_IPV6(0x44d2, 0x9e40, 0, 0, 0, 0, 0, 0), 28, 122},
+	{RTE_IPV6(0xb73f, 0xdf5e, 0x5129, 0xcb14, 0xecd4, 0xdcc7, 0, 0), 97, 12},
+	{RTE_IPV6(0x8392, 0x027d, 0xae2b, 0xe714, 0xc200, 0, 0, 0), 71, 171},
+	{RTE_IPV6(0x1fb4, 0xf69e, 0x1cc0, 0xec27, 0xed37, 0x4ac3, 0xabc0, 0), 106, 42},
+	{RTE_IPV6(0xb30a, 0x4650, 0, 0, 0, 0, 0, 0), 28, 194},
+	{RTE_IPV6(0x9333, 0x55b9, 0xead1, 0xec57, 0x9311, 0x0744, 0x9420, 0), 107, 237},
+	{RTE_IPV6(0xb1b2, 0x0628, 0x2ea6, 0x57c6, 0xd6ea, 0x17e0, 0, 0), 93, 151},
+	{RTE_IPV6(0xc935, 0x2814, 0x3104, 0x268b, 0x85d9, 0xd686, 0x59c8, 0), 109, 238},
+	{RTE_IPV6(0x041a, 0xb525, 0xce81, 0xe920, 0, 0, 0, 0), 59, 128},
+	{RTE_IPV6(0x513a, 0xf800, 0, 0, 0, 0, 0, 0), 26, 227},
+	{RTE_IPV6(0x12ee, 0xfaa1, 0x39f6, 0xd076, 0x0e4c, 0x4919, 0x4116, 0x9878), 127, 138},
+	{RTE_IPV6(0x1f80, 0, 0, 0, 0, 0, 0, 0), 10, 60},
+	{RTE_IPV6(0x73c3, 0x4000, 0, 0, 0, 0, 0, 0), 18, 148},
+	{RTE_IPV6(0x7416, 0x4b21, 0x1081, 0x237c, 0x0a70, 0x1fd5, 0xb56c, 0xb12e), 128, 129},
+	{RTE_IPV6(0x75d6, 0x1450, 0x5333, 0x8000, 0, 0, 0, 0), 49, 202},
+	{RTE_IPV6(0x784b, 0x7c95, 0x787b, 0xf297, 0xb5a4, 0x8000, 0, 0), 81, 88},
+	{RTE_IPV6(0x57ee, 0xa83e, 0x58a6, 0x3468, 0xdba9, 0x5d80, 0, 0), 90, 3},
+	{RTE_IPV6(0xed2c, 0xe092, 0x3455, 0xf5c0, 0x4189, 0x255f, 0x9cb0, 0), 108, 243},
+	{RTE_IPV6(0xd6f1, 0x333f, 0x493d, 0xc1a5, 0x176c, 0, 0, 0), 80, 95},
+	{RTE_IPV6(0x57f2, 0x159d, 0x2dbc, 0x243e, 0x42f3, 0x4000, 0, 0), 87, 255},
+	{RTE_IPV6(0x0061, 0xdc40, 0, 0, 0, 0, 0, 0), 26, 48},
+	{RTE_IPV6(0xe3ce, 0xbd1f, 0xde08, 0xc000, 0, 0, 0, 0), 50, 38},
+	{RTE_IPV6(0xae1b, 0x0010, 0x0d96, 0x217a, 0x9a3b, 0xec23, 0xf8b2, 0x4000), 115, 20},
+	{RTE_IPV6(0x2714, 0x7d45, 0xfc40, 0, 0, 0, 0, 0), 43, 41},
+	{RTE_IPV6(0x8de8, 0x010c, 0x7de5, 0xa80e, 0x7d74, 0xb400, 0, 0), 92, 133},
+	{RTE_IPV6(0x5dee, 0x28e4, 0xfecb, 0xfb06, 0x3c52, 0xf3f2, 0, 0), 95, 189},
+	{RTE_IPV6(0x2c73, 0xc811, 0x92df, 0x73fd, 0x7ece, 0x985a, 0, 0), 95, 151},
+	{RTE_IPV6(0xd53a, 0xebff, 0x06a3, 0x3d0a, 0xe000, 0, 0, 0), 68, 100},
+	{RTE_IPV6(0x1956, 0x8b74, 0xbe58, 0, 0, 0, 0, 0), 49, 118},
+	{RTE_IPV6(0x7128, 0x418d, 0x4000, 0, 0, 0, 0, 0), 34, 164},
+	{RTE_IPV6(0x95cd, 0xc8ba, 0x137e, 0xd7c7, 0x5e25, 0x6420, 0x8000, 0), 98, 71},
+	{RTE_IPV6(0x2700, 0, 0, 0, 0, 0, 0, 0), 9, 251},
+	{RTE_IPV6(0x5157, 0x50ad, 0xa3a6, 0x6800, 0, 0, 0, 0), 57, 51},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 3, 185},
+	{RTE_IPV6(0x8c80, 0, 0, 0, 0, 0, 0, 0), 10, 144},
+	{RTE_IPV6(0x062a, 0x01b2, 0xfa35, 0xbab2, 0x7279, 0xc000, 0, 0), 84, 51},
+	{RTE_IPV6(0x0211, 0xea33, 0xa905, 0xdb95, 0xf5ed, 0x0400, 0, 0), 87, 32},
+	{RTE_IPV6(0x70bb, 0xad11, 0xe5ab, 0xe1aa, 0x0800, 0, 0, 0), 70, 137},
+	{RTE_IPV6(0xcb47, 0x8ced, 0x7160, 0x7b10, 0, 0, 0, 0), 60, 2},
+	{RTE_IPV6(0x638a, 0xcf02, 0xf419, 0xd362, 0, 0, 0, 0), 63, 163},
+	{RTE_IPV6(0x722a, 0x62f6, 0xfc30, 0xe976, 0x3fe2, 0x9de2, 0xc000, 0), 100, 162},
+	{RTE_IPV6(0xa140, 0, 0, 0, 0, 0, 0, 0), 10, 192},
+	{RTE_IPV6(0xe946, 0xf02d, 0xf000, 0, 0, 0, 0, 0), 36, 185},
+	{RTE_IPV6(0x1c7b, 0x1fb0, 0xebe5, 0xa9c0, 0, 0, 0, 0), 59, 51},
+	{RTE_IPV6(0x92c5, 0xf3eb, 0xf338, 0x8c00, 0, 0, 0, 0), 54, 93},
+	{RTE_IPV6(0, 0, 0, 0, 0, 0, 0, 0), 2, 159},
+	{RTE_IPV6(0x8d5c, 0x0d1b, 0x57f1, 0xab8f, 0xdc00, 0, 0, 0), 72, 189},
+	{RTE_IPV6(0xa497, 0xc000, 0, 0, 0, 0, 0, 0), 21, 248},
+	{RTE_IPV6(0x23bc, 0xf84f, 0x2797, 0xe8d7, 0xf8f5, 0xb990, 0x4e66, 0xad80), 123, 38},
+	{RTE_IPV6(0xc1e8, 0xa63c, 0x3e50, 0xe6e1, 0xa5f0, 0, 0, 0), 76, 167},
+	{RTE_IPV6(0x6de5, 0x769b, 0x2b9a, 0, 0, 0, 0, 0), 51, 28},
+	{RTE_IPV6(0xa03e, 0x3fd4, 0xda8a, 0x9a6c, 0xa37f, 0xc5ed, 0xb72c, 0x8cc0), 125, 37},
+	{RTE_IPV6(0xc425, 0x3392, 0x1a55, 0x351f, 0xd88d, 0x34da, 0x9920, 0), 107, 234},
+	{RTE_IPV6(0xe480, 0, 0, 0, 0, 0, 0, 0), 9, 70},
+	{RTE_IPV6(0x9af8, 0x14f2, 0x9af4, 0x3f11, 0x7934, 0x4654, 0x76d0, 0), 108, 50},
+	{RTE_IPV6(0x2964, 0x1b54, 0x6a70, 0x6000, 0, 0, 0, 0), 51, 171},
+	{RTE_IPV6(0x5163, 0xc58b, 0x1e96, 0xe6d8, 0x51be, 0x54a5, 0x1d40, 0x8000), 113, 236},
+	{RTE_IPV6(0x7000, 0, 0, 0, 0, 0, 0, 0), 4, 3},
+	{RTE_IPV6(0xa477, 0xfd7e, 0xa0f9, 0xb7bf, 0x776f, 0xe000, 0, 0), 86, 64},
+	{RTE_IPV6(0x8a3a, 0xc6fe, 0x00c5, 0x3c5b, 0x84c7, 0xb5fb, 0x4ea0, 0), 108, 213},
+	{RTE_IPV6(0xd159, 0xa8ec, 0x92a9, 0x6400, 0, 0, 0, 0), 54, 15},
+	{RTE_IPV6(0x83d2, 0xd000, 0, 0, 0, 0, 0, 0), 20, 145},
+	{RTE_IPV6(0xa5be, 0x9d07, 0x8305, 0x9300, 0, 0, 0, 0), 57, 27},
+	{RTE_IPV6(0xb3e2, 0x39cc, 0xbb46, 0x3451, 0x77a2, 0xe52a, 0x2fb9, 0x09a2), 127, 75},
+	{RTE_IPV6(0x62eb, 0x9b33, 0x6ba7, 0x7f89, 0xfef6, 0xa2ab, 0xb40d, 0xe900), 123, 76},
+	{RTE_IPV6(0x6b4f, 0x4c5a, 0x5e97, 0x9b1f, 0x2173, 0x13cc, 0x6273, 0), 113, 247},
+	{RTE_IPV6(0x8f2e, 0x1eaf, 0xe000, 0, 0, 0, 0, 0), 43, 121},
+	{RTE_IPV6(0x9b55, 0xd9b4, 0, 0, 0, 0, 0, 0), 30, 214},
+	{RTE_IPV6(0x3a3e, 0x9c00, 0, 0, 0, 0, 0, 0), 26, 221},
+	{RTE_IPV6(0x5c9b, 0x3503, 0x276c, 0x9bc8, 0, 0, 0, 0), 63, 102},
+	{RTE_IPV6(0x4000, 0, 0, 0, 0, 0, 0, 0), 2, 191},
+	{RTE_IPV6(0x3f86, 0xfb3b, 0xc000, 0, 0, 0, 0, 0), 39, 197},
+	{RTE_IPV6(0xea95, 0xdc6a, 0x0090, 0xd680, 0x2366, 0, 0, 0), 79, 106},
 };
 
 #define  NUM_ROUTE_ENTRIES RTE_DIM(large_route_table)
@@ -1067,19 +1068,19 @@ static inline void mask_ip6_prefix(uint8_t *ip_out,
 /* check if IPv6 address ip[] match the rule with IPv6 address ip_rule[]
  * and depth. if matched, return 0, else return -1.
  */
-static inline int check_lpm6_rule(uint8_t *ip,
-	const uint8_t *ip_rule, uint8_t depth)
+static inline int check_lpm6_rule(const struct rte_ipv6_addr *ip,
+	const struct rte_ipv6_addr *ip_rule, uint8_t depth)
 {
 	int k;
 	uint8_t mask;
 
 	for (k = 0; k < 16; k++) {
 		if (depth >= 8) {
-			if (ip[k] != ip_rule[k])
+			if (ip->a[k] != ip_rule->a[k])
 				return -1;
 		} else if (depth > 0) {
 			mask = (uint8_t)((unsigned int)(-1) << (8 - depth));
-			if ((ip[k] & mask) == (ip_rule[k] & mask))
+			if ((ip->a[k] & mask) == (ip_rule->a[k] & mask))
 				return 0;
 			else
 				return -1;
@@ -1098,7 +1099,7 @@ static inline int check_lpm6_rule(uint8_t *ip,
  * if found that some item in rule[] is matched return 0,
  * else return -1;
  */
-static int get_next_hop(uint8_t *ip, uint8_t *next_hop,
+static int get_next_hop(const struct rte_ipv6_addr *ip, uint8_t *next_hop,
 	const struct rules_tbl_entry *rule, int rule_num)
 {
 	int i;
@@ -1107,7 +1108,7 @@ static int get_next_hop(uint8_t *ip, uint8_t *next_hop,
 
 	for (i = 0; i < rule_num; i++) {
 		if (rule[i].depth >= max_depth) {
-			result = check_lpm6_rule(ip, rule[i].ip, rule[i].depth);
+			result = check_lpm6_rule(ip, &rule[i].ip, rule[i].depth);
 			if (result == 0) {
 				*next_hop = rule[i].next_hop;
 				max_depth = rule[i].depth;
@@ -1131,12 +1132,12 @@ static void generate_large_ips_table(int gen_expected_next_hop)
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++) {
 		for (j = 0; j < 16; j++)
-			large_ips_table[i].ip[j] = rte_rand();
+			large_ips_table[i].ip.a[j] = rte_rand();
 	}
 
 	for (k = j = 0, i = 0; i < NUM_IPS_ENTRIES; i++) {
-		mask_ip6_prefix(large_ips_table[i].ip,
-			large_route_table[j].ip, large_route_table[j].depth);
+		mask_ip6_prefix(large_ips_table[i].ip.a,
+			large_route_table[j].ip.a, large_route_table[j].depth);
 		k++;
 		if (k == (NUM_IPS_ENTRIES / NUM_ROUTE_ENTRIES)) {
 			j++;
@@ -1150,7 +1151,7 @@ static void generate_large_ips_table(int gen_expected_next_hop)
 		return;
 
 	for (k = 0; k < NUM_IPS_ENTRIES; k++)
-		get_next_hop(large_ips_table[k].ip,
+		get_next_hop(&large_ips_table[k].ip,
 				&(large_ips_table[k].next_hop),
 				large_route_table,
 				NUM_ROUTE_ENTRIES);
diff --git a/app/test/test_lpm6_perf.c b/app/test/test_lpm6_perf.c
index c847dcb18375..1860a99ed6f1 100644
--- a/app/test/test_lpm6_perf.c
+++ b/app/test/test_lpm6_perf.c
@@ -82,7 +82,7 @@ test_lpm6_perf(void)
 	begin = rte_rdtsc();
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
-		if (rte_lpm6_add(lpm, large_route_table[i].ip,
+		if (rte_lpm6_add(lpm, &large_route_table[i].ip,
 				large_route_table[i].depth, next_hop_add) == 0)
 			status++;
 	}
@@ -101,7 +101,7 @@ test_lpm6_perf(void)
 		begin = rte_rdtsc();
 
 		for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
-			if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
+			if (rte_lpm6_lookup(lpm, &large_ips_table[j].ip,
 					&next_hop_return) != 0)
 				count++;
 		}
@@ -117,11 +117,11 @@ test_lpm6_perf(void)
 	total_time = 0;
 	count = 0;
 
-	uint8_t ip_batch[NUM_IPS_ENTRIES][16];
+	struct rte_ipv6_addr ip_batch[NUM_IPS_ENTRIES];
 	int32_t next_hops[NUM_IPS_ENTRIES];
 
 	for (i = 0; i < NUM_IPS_ENTRIES; i++)
-		memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+		ip_batch[i] = large_ips_table[i].ip;
 
 	for (i = 0; i < ITERATIONS; i ++) {
 
@@ -144,7 +144,7 @@ test_lpm6_perf(void)
 
 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
 		/* rte_lpm_delete(lpm, ip, depth) */
-		status += rte_lpm6_delete(lpm, large_route_table[i].ip,
+		status += rte_lpm6_delete(lpm, &large_route_table[i].ip,
 				large_route_table[i].depth);
 	}
 
diff --git a/app/test/test_table_combined.c b/app/test/test_table_combined.c
index 1c2efe649ded..04503baa5180 100644
--- a/app/test/test_table_combined.c
+++ b/app/test/test_table_combined.c
@@ -362,7 +362,7 @@ test_table_lpm_ipv6_combined(void)
 	struct rte_table_lpm_ipv6_key lpm_ipv6_key = {
 		.depth = 16,
 	};
-	memset(lpm_ipv6_key.ip, 0xad, 16);
+	memset(&lpm_ipv6_key.ip, 0xad, 16);
 
 	struct table_packets table_packets;
 
diff --git a/app/test/test_table_tables.c b/app/test/test_table_tables.c
index 26908e6112fc..920aa555cbd2 100644
--- a/app/test/test_table_tables.c
+++ b/app/test/test_table_tables.c
@@ -525,10 +525,10 @@ test_table_lpm_ipv6(void)
 	/* Add */
 	struct rte_table_lpm_ipv6_key lpm_key;
 
-	lpm_key.ip[0] = 0xad;
-	lpm_key.ip[1] = 0xad;
-	lpm_key.ip[2] = 0xad;
-	lpm_key.ip[3] = 0xad;
+	lpm_key.ip.a[0] = 0xad;
+	lpm_key.ip.a[1] = 0xad;
+	lpm_key.ip.a[2] = 0xad;
+	lpm_key.ip.a[3] = 0xad;
 
 	table = rte_table_lpm_ipv6_ops.f_create(&lpm_params, 0, entry_size);
 	if (table == NULL)
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 830904203c38..0b658fce37f7 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -80,13 +80,6 @@ Deprecation Notices
     - ``struct rte_ipv6_tuple``
   ipsec
     - ``struct rte_ipsec_sadv6_key``
-  lpm
-    - ``rte_lpm6_add()``
-    - ``rte_lpm6_is_rule_present()``
-    - ``rte_lpm6_delete()``
-    - ``rte_lpm6_delete_bulk_func()``
-    - ``rte_lpm6_lookup()``
-    - ``rte_lpm6_lookup_bulk_func()``
   node
     - ``rte_node_ip6_route_add()``
   pipeline
@@ -98,8 +91,6 @@ Deprecation Notices
     - ``rte_rib6_insert()``
     - ``rte_rib6_remove()``
     - ``rte_rib6_get_ip()``
-  table
-    - ``struct rte_table_lpm_ipv6_key``
 
 * net, ethdev: The flow item ``RTE_FLOW_ITEM_TYPE_VXLAN_GPE``
   is replaced with ``RTE_FLOW_ITEM_TYPE_VXLAN``.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index de24705ef662..452fd7518ee3 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -289,8 +289,20 @@ API Changes
 * net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays.
   The following libraries and symbols were modified:
 
+  lpm
+    - ``rte_lpm6_add()``
+    - ``rte_lpm6_delete()``
+    - ``rte_lpm6_delete_bulk_func()``
+    - ``rte_lpm6_is_rule_present()``
+    - ``rte_lpm6_lookup()``
+    - ``rte_lpm6_lookup_bulk_func()``
+    - ``RTE_LPM6_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
+    - ``RTE_LPM6_MAX_DEPTH`` (deprecated, replaced with ``RTE_IPV6_MAX_DEPTH``)
   net
     - ``struct rte_ipv6_hdr``
+  table
+    - ``struct rte_table_lpm_ipv6_key``
+    - ``RTE_LPM_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
 
 
 ABI Changes
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 4c0fa5054a2e..1f841028442f 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -184,21 +184,20 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
  */
 
 struct l3fwd_ipv6_route {
-	uint8_t ip[IPV6_ADDR_LEN];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
 
 /* Default l3fwd_ipv6_route_array table. 8< */
 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
-	{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
-	{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
-	{{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
-	{{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
-	{{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
-	{{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
-	{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
-	{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
+	{RTE_IPV6(0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 0},
+	{RTE_IPV6(0x0201, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 1},
+	{RTE_IPV6(0x0301, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 2},
+	{RTE_IPV6(0x0401, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 3},
+	{RTE_IPV6(0x0501, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 4},
+	{RTE_IPV6(0x0601, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 5},
+	{RTE_IPV6(0x0701, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 6}
 };
 /* >8 End of default l3fwd_ipv6_route_array table. */
 
@@ -311,7 +310,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
+		if (rte_lpm6_lookup(rxq->lpm6, &ip_hdr->dst_addr,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			port_out = next_hop;
@@ -751,7 +750,7 @@ init_routing_table(void)
 			/* populate the LPM6 table */
 			for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
 				ret = rte_lpm6_add(lpm6,
-					l3fwd_ipv6_route_array[i].ip,
+					&l3fwd_ipv6_route_array[i].ip,
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 
@@ -764,7 +763,7 @@ init_routing_table(void)
 				RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT
 						"/%d (port %d)\n",
 					socket,
-					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
+					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip.a),
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 			}
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
index 8a912dc1b392..9d8082b73080 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -2563,7 +2563,7 @@ match_convert(struct table_rule_match *mh,
 			ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
 			ml->lpm_ipv4.depth = mh->match.lpm.depth;
 		} else {
-			memcpy(ml->lpm_ipv6.ip,
+			memcpy(&ml->lpm_ipv6.ip,
 				mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
 			ml->lpm_ipv6.depth = mh->match.lpm.depth;
 		}
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 4da692eb23e6..17ae76d4badb 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -205,21 +205,20 @@ struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
  */
 
 struct l3fwd_ipv6_route {
-	uint8_t ip[IPV6_ADDR_LEN];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
 
 /* Default l3fwd_ipv6_route_array table. 8< */
 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
-	{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
-	{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
-	{{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
-	{{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
-	{{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
-	{{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
-	{{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
-	{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
+	{RTE_IPV6(0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 0},
+	{RTE_IPV6(0x0201, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 1},
+	{RTE_IPV6(0x0301, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 2},
+	{RTE_IPV6(0x0401, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 3},
+	{RTE_IPV6(0x0501, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 4},
+	{RTE_IPV6(0x0601, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 5},
+	{RTE_IPV6(0x0701, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, 0x0101), 48, 6}
 };
 /* >8 End of default l3fwd_ipv6_route_array table. */
 
@@ -400,7 +399,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
 		}
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
+		if (rte_lpm6_lookup(rxq->lpm6, &ip_hdr->dst_addr,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			dst_port = next_hop;
@@ -797,7 +796,7 @@ init_routing_table(void)
 			/* populate the LPM6 table */
 			for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
 				ret = rte_lpm6_add(lpm6,
-					l3fwd_ipv6_route_array[i].ip,
+					&l3fwd_ipv6_route_array[i].ip,
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 
@@ -810,7 +809,7 @@ init_routing_table(void)
 				RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
 						"/%d (port %d)\n",
 					socket,
-					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
+					IPv6_BYTES(l3fwd_ipv6_route_array[i].ip.a),
 					l3fwd_ipv6_route_array[i].depth,
 					l3fwd_ipv6_route_array[i].if_out);
 			}
diff --git a/examples/ipsec-secgw/ipsec_lpm_neon.h b/examples/ipsec-secgw/ipsec_lpm_neon.h
index 9573f53ae957..752401d9f27f 100644
--- a/examples/ipsec-secgw/ipsec_lpm_neon.h
+++ b/examples/ipsec-secgw/ipsec_lpm_neon.h
@@ -5,6 +5,8 @@
 #ifndef IPSEC_LPM_NEON_H
 #define IPSEC_LPM_NEON_H
 
+#include <rte_ip6.h>
+
 #include <arm_neon.h>
 #include "ipsec_neon.h"
 
@@ -114,7 +116,7 @@ process_single_pkt(struct rt_ctx *rt_ctx, struct rte_mbuf *pkt,
 static inline void
 route6_pkts_neon(struct rt_ctx *rt_ctx, struct rte_mbuf **pkts, int nb_rx)
 {
-	uint8_t dst_ip6[MAX_PKT_BURST][16];
+	struct rte_ipv6_addr dst_ip6[MAX_PKT_BURST];
 	uint16_t dst_port[MAX_PKT_BURST];
 	struct rte_ether_hdr *eth_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
@@ -142,8 +144,7 @@ route6_pkts_neon(struct rt_ctx *rt_ctx, struct rte_mbuf **pkts, int nb_rx)
 			 * required to get the hop
 			 */
 			ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
-			memcpy(&dst_ip6[lpm_pkts][0],
-					ipv6_hdr->dst_addr, 16);
+			dst_ip6[lpm_pkts] = ipv6_hdr->dst_addr;
 			lpm_pkts++;
 		}
 	}
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index dd14226e8140..e0690fc8d9bd 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -298,17 +298,12 @@ route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 static inline uint16_t
 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
-	uint8_t dst_ip[16];
-	uint8_t *ip6_dst;
-	uint16_t offset;
+	struct rte_ipv6_hdr *ip;
 	uint32_t hop;
 	int ret;
 
-	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
-	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
-	memcpy(&dst_ip[0], ip6_dst, 16);
-
-	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
+	ip = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, RTE_ETHER_HDR_LEN);
+	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, &ip->dst_addr, &hop);
 
 	if (ret == 0) {
 		/* We have a hit */
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 8f6a1c06aa7f..8f9616129362 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -560,7 +560,7 @@ static __rte_always_inline void
 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
 {
 	int32_t hop[MAX_PKT_BURST * 2];
-	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+	struct rte_ipv6_addr dst_ip[MAX_PKT_BURST * 2];
 	struct rte_ether_hdr *ethhdr;
 	uint8_t *ip6_dst;
 	uint32_t pkt_hop = 0;
@@ -586,7 +586,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
 			offset = offsetof(struct ip6_hdr, ip6_dst);
 			ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
 					offset);
-			memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+			memcpy(&dst_ip[lpm_pkts], ip6_dst, 16);
 			lpm_pkts++;
 		}
 	}
diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c
index ce854ccb6018..059fc0c8f28c 100644
--- a/examples/ipsec-secgw/rt.c
+++ b/examples/ipsec-secgw/rt.c
@@ -25,7 +25,7 @@ struct ip4_route {
 };
 
 struct ip6_route {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -99,7 +99,7 @@ parse_rt_tokens(char **tokens, uint32_t n_tokens,
 					tokens[ti]);
 				if (status->status < 0)
 					return;
-				memcpy(route_ipv6->ip, ip.s6_addr, 16);
+				memcpy(&route_ipv6->ip, ip.s6_addr, 16);
 				route_ipv6->depth = (uint8_t)depth;
 			}
 		}
@@ -183,7 +183,7 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id)
 
 	/* populate the LPM table */
 	for (i = 0; i < nb_rt_ip6; i++) {
-		ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth,
+		ret = rte_lpm6_add(lpm6, &rt_ip6[i].ip, rt_ip6[i].depth,
 				rt_ip6[i].if_out);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
@@ -191,14 +191,14 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id)
 
 		printf("LPM6: Adding route "
 			" %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n",
-			(uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]),
-			(uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]),
-			(uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]),
-			(uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]),
-			(uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]),
-			(uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]),
-			(uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]),
-			(uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]),
+			(uint16_t)((rt_ip6[i].ip.a[0] << 8) | rt_ip6[i].ip.a[1]),
+			(uint16_t)((rt_ip6[i].ip.a[2] << 8) | rt_ip6[i].ip.a[3]),
+			(uint16_t)((rt_ip6[i].ip.a[4] << 8) | rt_ip6[i].ip.a[5]),
+			(uint16_t)((rt_ip6[i].ip.a[6] << 8) | rt_ip6[i].ip.a[7]),
+			(uint16_t)((rt_ip6[i].ip.a[8] << 8) | rt_ip6[i].ip.a[9]),
+			(uint16_t)((rt_ip6[i].ip.a[10] << 8) | rt_ip6[i].ip.a[11]),
+			(uint16_t)((rt_ip6[i].ip.a[12] << 8) | rt_ip6[i].ip.a[13]),
+			(uint16_t)((rt_ip6[i].ip.a[14] << 8) | rt_ip6[i].ip.a[15]),
 			rt_ip6[i].depth, rt_ip6[i].if_out);
 	}
 
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index a13dc011380d..9bda0ab633e1 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -151,7 +151,7 @@ struct ipv4_l3fwd_lpm_route {
 };
 
 struct ipv6_l3fwd_lpm_route {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	uint8_t ip[RTE_IPV6_ADDR_SIZE];
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -1371,7 +1371,7 @@ main(int argc, char **argv)
 
 		dst_port = ipv6_l3fwd_lpm_route_array[i].if_out;
 
-		memcpy(in6.s6_addr, ipv6_l3fwd_lpm_route_array[i].ip, RTE_LPM6_IPV6_ADDR_SIZE);
+		memcpy(in6.s6_addr, ipv6_l3fwd_lpm_route_array[i].ip, RTE_IPV6_ADDR_SIZE);
 		snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
 			 inet_ntop(AF_INET6, &in6, abuf, sizeof(abuf)),
 			 ipv6_l3fwd_lpm_route_array[i].depth,
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 85f862dd5b40..339cd58116a4 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -732,7 +732,7 @@ setup_fib(const int socketid)
 		rte_eth_dev_info_get(route_base_v6[i].if_out,
 				     &dev_info);
 		ret = rte_fib6_add(ipv6_l3fwd_fib_lookup_struct[socketid],
-			route_base_v6[i].ip_8,
+			route_base_v6[i].ip6.a,
 			route_base_v6[i].depth,
 			route_base_v6[i].if_out);
 
@@ -744,7 +744,7 @@ setup_fib(const int socketid)
 					i, socketid);
 		}
 
-		if (inet_ntop(AF_INET6, route_base_v6[i].ip_8,
+		if (inet_ntop(AF_INET6, route_base_v6[i].ip6.a,
 				abuf, sizeof(abuf)) != NULL) {
 			printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
 			       route_base_v6[i].depth,
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index 422fdb70054d..fc4f5878fcd8 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -62,7 +62,7 @@ lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
 		      uint16_t portid,
 		      struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
 {
-	const uint8_t *dst_ip = ipv6_hdr->dst_addr.a;
+	const struct rte_ipv6_addr *dst_ip = &ipv6_hdr->dst_addr;
 	uint32_t next_hop;
 
 	if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
@@ -122,7 +122,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
 
 		return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
-				ipv6_hdr->dst_addr.a, &next_hop) == 0)
+				&ipv6_hdr->dst_addr, &next_hop) == 0)
 				? next_hop : portid);
 
 	}
@@ -635,7 +635,7 @@ setup_lpm(const int socketid)
 		rte_eth_dev_info_get(route_base_v6[i].if_out,
 				     &dev_info);
 		ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
-			route_base_v6[i].ip_8,
+			&route_base_v6[i].ip6,
 			route_base_v6[i].depth,
 			route_base_v6[i].if_out);
 
@@ -647,7 +647,7 @@ setup_lpm(const int socketid)
 		}
 
 		printf("LPM: Adding route %s / %d (%d) [%s]\n",
-		       inet_ntop(AF_INET6, route_base_v6[i].ip_8, abuf,
+		       inet_ntop(AF_INET6, &route_base_v6[i].ip6, abuf,
 				 sizeof(abuf)),
 		       route_base_v6[i].depth,
 		       route_base_v6[i].if_out, rte_dev_name(dev_info.device));
diff --git a/examples/l3fwd/l3fwd_route.h b/examples/l3fwd/l3fwd_route.h
index 467c4d285915..62263c354054 100644
--- a/examples/l3fwd/l3fwd_route.h
+++ b/examples/l3fwd/l3fwd_route.h
@@ -2,6 +2,8 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <rte_ip6.h>
+
 /* Log file related character defs. */
 #define COMMENT_LEAD_CHAR	('#')
 #define ROUTE_LEAD_CHAR		('R')
@@ -29,7 +31,7 @@ struct ipv4_l3fwd_route {
 };
 
 struct ipv6_l3fwd_route {
-	uint8_t ip[16];
+	struct rte_ipv6_addr ip;
 	uint8_t depth;
 	uint8_t if_out;
 };
@@ -53,10 +55,7 @@ struct ipv6_5tuple {
 struct lpm_route_rule {
 	union {
 		uint32_t ip;
-		union {
-			uint32_t ip_32[IPV6_ADDR_U32];
-			uint8_t ip_8[IPV6_ADDR_LEN];
-		};
+		struct rte_ipv6_addr ip6;
 	};
 	uint8_t depth;
 	uint8_t if_out;
diff --git a/examples/l3fwd/lpm_route_parse.c b/examples/l3fwd/lpm_route_parse.c
index f2028d79e180..9c179dc065c0 100644
--- a/examples/l3fwd/lpm_route_parse.c
+++ b/examples/l3fwd/lpm_route_parse.c
@@ -40,7 +40,7 @@ is_bypass_line(const char *buff)
 }
 
 static int
-parse_ipv6_addr_mask(char *token, uint32_t *ipv6, uint8_t *mask)
+parse_ipv6_addr_mask(char *token, struct rte_ipv6_addr *ipv6, uint8_t *mask)
 {
 	char *sa, *sm, *sv;
 	const char *dlm =  "/";
@@ -83,7 +83,7 @@ parse_ipv4_addr_mask(char *token, uint32_t *ipv4, uint8_t *mask)
 }
 
 static int
-lpm_parse_v6_net(char *in, uint32_t *v, uint8_t *mask_len)
+lpm_parse_v6_net(char *in, struct rte_ipv6_addr *v, uint8_t *mask_len)
 {
 	int32_t rc;
 
@@ -108,7 +108,7 @@ lpm_parse_v6_rule(char *str, struct lpm_route_rule *v)
 			return -EINVAL;
 	}
 
-	rc = lpm_parse_v6_net(in[CB_FLD_DST_ADDR], v->ip_32, &v->depth);
+	rc = lpm_parse_v6_net(in[CB_FLD_DST_ADDR], &v->ip6, &v->depth);
 
 	GET_CB_FIELD(in[CB_FLD_IF_OUT], v->if_out, 0, UINT8_MAX, 0);
 
@@ -164,8 +164,7 @@ lpm_add_default_v6_rules(void)
 	route_base_v6 = calloc(route_num_v6, rule_size);
 
 	for (i = 0; i < (unsigned int)route_num_v6; i++) {
-		memcpy(route_base_v6[i].ip_8, ipv6_l3fwd_route_array[i].ip,
-			   sizeof(route_base_v6[i].ip_8));
+		route_base_v6[i].ip6 = ipv6_l3fwd_route_array[i].ip;
 		route_base_v6[i].depth = ipv6_l3fwd_route_array[i].depth;
 		route_base_v6[i].if_out = ipv6_l3fwd_route_array[i].if_out;
 	}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 01b763e5ba11..6e2155e0052e 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -228,22 +228,22 @@ const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
  * 2001:200:0:{0-f}::/64 = Port {0-15}
  */
 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
-	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
-	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
-	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
-	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
-	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
-	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
-	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
-	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
-	{{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
-	{{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
-	{{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
-	{{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
-	{{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
-	{{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
-	{{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
-	{{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x0, 0, 0, 0, 0), 64, 0},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x1, 0, 0, 0, 0), 64, 1},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x2, 0, 0, 0, 0), 64, 2},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x3, 0, 0, 0, 0), 64, 3},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x4, 0, 0, 0, 0), 64, 4},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x5, 0, 0, 0, 0), 64, 5},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x6, 0, 0, 0, 0), 64, 6},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x7, 0, 0, 0, 0), 64, 7},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x8, 0, 0, 0, 0), 64, 8},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0x9, 0, 0, 0, 0), 64, 9},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xa, 0, 0, 0, 0), 64, 10},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xb, 0, 0, 0, 0), 64, 11},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xc, 0, 0, 0, 0), 64, 12},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xd, 0, 0, 0, 0), 64, 13},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xe, 0, 0, 0, 0), 64, 14},
+	{RTE_IPV6(0x2001, 0x0200, 0, 0xf, 0, 0, 0, 0), 64, 15},
 };
 
 /*
diff --git a/lib/lpm/meson.build b/lib/lpm/meson.build
index ae30f80b69c8..fae4f79fb938 100644
--- a/lib/lpm/meson.build
+++ b/lib/lpm/meson.build
@@ -20,3 +20,4 @@ indirect_headers += files(
 )
 deps += ['hash']
 deps += ['rcu']
+deps += ['net']
diff --git a/lib/lpm/rte_lpm6.c b/lib/lpm/rte_lpm6.c
index 5bc17601ab71..8beb394c4701 100644
--- a/lib/lpm/rte_lpm6.c
+++ b/lib/lpm/rte_lpm6.c
@@ -67,14 +67,14 @@ struct rte_lpm6_tbl_entry {
 
 /** Rules tbl entry structure. */
 struct rte_lpm6_rule {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
+	struct rte_ipv6_addr ip; /**< Rule IP address. */
 	uint32_t next_hop; /**< Rule next hop. */
 	uint8_t depth; /**< Rule depth. */
 };
 
 /** Rules tbl entry key. */
 struct rte_lpm6_rule_key {
-	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
+	struct rte_ipv6_addr ip; /**< Rule IP address. */
 	uint32_t depth; /**< Rule depth. */
 };
 
@@ -111,37 +111,6 @@ struct rte_lpm6 {
 			/**< LPM tbl8 table. */
 };
 
-/*
- * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
- * It leaves untouched one bit per unit in the depth variable
- * and set the rest to 0.
- */
-static inline void
-ip6_mask_addr(uint8_t *ip, uint8_t depth)
-{
-	int16_t part_depth, mask;
-	int i;
-
-	part_depth = depth;
-
-	for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
-		if (part_depth < BYTE_SIZE && part_depth >= 0) {
-			mask = (uint16_t)(~(UINT8_MAX >> part_depth));
-			ip[i] = (uint8_t)(ip[i] & mask);
-		} else if (part_depth < 0)
-			ip[i] = 0;
-
-		part_depth -= BYTE_SIZE;
-	}
-}
-
-/* copy ipv6 address */
-static inline void
-ip6_copy_addr(uint8_t *dst, const uint8_t *src)
-{
-	rte_memcpy(dst, src, RTE_LPM6_IPV6_ADDR_SIZE);
-}
-
 /*
  * LPM6 rule hash function
  *
@@ -213,9 +182,9 @@ tbl8_available(struct rte_lpm6 *lpm)
  *	  note that ip must be already masked
  */
 static inline void
-rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth)
+rule_key_init(struct rte_lpm6_rule_key *key, const struct rte_ipv6_addr *ip, uint8_t depth)
 {
-	ip6_copy_addr(key->ip, ip);
+	key->ip = *ip;
 	key->depth = depth;
 }
 
@@ -231,7 +200,7 @@ rebuild_lpm(struct rte_lpm6 *lpm)
 
 	while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key,
 			(void **) &next_hop, &iter) >= 0)
-		rte_lpm6_add(lpm, rule_key->ip, rule_key->depth,
+		rte_lpm6_add(lpm, &rule_key->ip, rule_key->depth,
 			(uint32_t) next_hop);
 }
 
@@ -460,7 +429,7 @@ rule_find_with_key(struct rte_lpm6 *lpm,
 
 /* Find a rule */
 static int
-rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rule_find(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth,
 		  uint32_t *next_hop)
 {
 	struct rte_lpm6_rule_key rule_key;
@@ -481,7 +450,7 @@ rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
  *   <0 - error
  */
 static inline int
-rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop)
+rule_add(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth, uint32_t next_hop)
 {
 	int ret, rule_exist;
 	struct rte_lpm6_rule_key rule_key;
@@ -570,7 +539,7 @@ init_tbl8_header(struct rte_lpm6 *lpm, uint32_t tbl_ind,
  * of the bytes being inspected in this step.
  */
 static uint32_t
-get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
+get_bitshift(const struct rte_ipv6_addr *ip, uint8_t first_byte, uint8_t bytes)
 {
 	uint32_t entry_ind, i;
 	int8_t bitshift;
@@ -581,7 +550,7 @@ get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
 
 		if (bitshift < 0)
 			bitshift = 0;
-		entry_ind = entry_ind | ip[i-1] << bitshift;
+		entry_ind = entry_ind | ip->a[i-1] << bitshift;
 	}
 
 	return entry_ind;
@@ -596,7 +565,7 @@ get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
  */
 static inline int
 simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
-		struct rte_lpm6_tbl_entry **next_tbl, const uint8_t *ip,
+		struct rte_lpm6_tbl_entry **next_tbl, const struct rte_ipv6_addr *ip,
 		uint8_t bytes, uint8_t first_byte, uint8_t depth,
 		uint32_t *need_tbl_nb)
 {
@@ -649,7 +618,7 @@ simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
 static inline int
 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
 		uint32_t tbl_ind, struct rte_lpm6_tbl_entry **next_tbl,
-		uint32_t *next_tbl_ind, uint8_t *ip, uint8_t bytes,
+		uint32_t *next_tbl_ind, struct rte_ipv6_addr *ip, uint8_t bytes,
 		uint8_t first_byte, uint8_t depth, uint32_t next_hop,
 		uint8_t is_new_rule)
 {
@@ -814,7 +783,7 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
  *    -ENOSPC not enough tbl8 left
  */
 static int
-simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
+simulate_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *masked_ip, uint8_t depth)
 {
 	struct rte_lpm6_tbl_entry *tbl;
 	struct rte_lpm6_tbl_entry *tbl_next = NULL;
@@ -833,7 +802,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
 	 * Inspect one by one the rest of the bytes until
 	 * the process is completed.
 	 */
-	for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) {
+	for (i = ADD_FIRST_BYTE; i < RTE_IPV6_ADDR_SIZE && ret == 1; i++) {
 		tbl = tbl_next;
 		ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1,
 			(uint8_t)(i + 1), depth, &need_tbl_nb);
@@ -851,7 +820,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
  * Add a route
  */
 int
-rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 	     uint32_t next_hop)
 {
 	struct rte_lpm6_tbl_entry *tbl;
@@ -859,24 +828,24 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	/* init to avoid compiler warning */
 	uint32_t tbl_next_num = 123456;
 	int status;
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	int i;
 
 	/* Check user arguments. */
-	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
 	/* Simulate adding a new route */
-	int ret = simulate_add(lpm, masked_ip, depth);
+	int ret = simulate_add(lpm, &masked_ip, depth);
 	if (ret < 0)
 		return ret;
 
 	/* Add the rule to the rule table. */
-	int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop);
+	int is_new_rule = rule_add(lpm, &masked_ip, depth, next_hop);
 	/* If there is no space available for new rule return error. */
 	if (is_new_rule < 0)
 		return is_new_rule;
@@ -884,7 +853,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	/* Inspect the first three bytes through tbl24 on the first step. */
 	tbl = lpm->tbl24;
 	status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num,
-		masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
+		&masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
 		is_new_rule);
 	assert(status >= 0);
 
@@ -892,10 +861,10 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 	 * Inspect one by one the rest of the bytes until
 	 * the process is completed.
 	 */
-	for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
+	for (i = ADD_FIRST_BYTE; i < RTE_IPV6_ADDR_SIZE && status == 1; i++) {
 		tbl = tbl_next;
 		status = add_step(lpm, tbl, tbl_next_num, &tbl_next,
-			&tbl_next_num, masked_ip, 1, (uint8_t)(i + 1),
+			&tbl_next_num, &masked_ip, 1, (uint8_t)(i + 1),
 			depth, next_hop, is_new_rule);
 		assert(status >= 0);
 	}
@@ -910,7 +879,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  */
 static inline int
 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
-		const struct rte_lpm6_tbl_entry **tbl_next, const uint8_t *ip,
+		const struct rte_lpm6_tbl_entry **tbl_next, const struct rte_ipv6_addr *ip,
 		uint8_t first_byte, uint32_t *next_hop)
 {
 	uint32_t tbl8_index, tbl_entry;
@@ -922,7 +891,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
 	if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
 
-		tbl8_index = ip[first_byte-1] +
+		tbl8_index = ip->a[first_byte-1] +
 				((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
 				RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
 
@@ -940,7 +909,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
  * Looks up an IP
  */
 int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip,
 		uint32_t *next_hop)
 {
 	const struct rte_lpm6_tbl_entry *tbl;
@@ -954,7 +923,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
 		return -EINVAL;
 
 	first_byte = LOOKUP_FIRST_BYTE;
-	tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
+	tbl24_index = (ip->a[0] << BYTES2_SIZE) | (ip->a[1] << BYTE_SIZE) | ip->a[2];
 
 	/* Calculate pointer to the first entry to be inspected */
 	tbl = &lpm->tbl24[tbl24_index];
@@ -973,7 +942,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
  */
 int
 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+		struct rte_ipv6_addr *ips,
 		int32_t *next_hops, unsigned int n)
 {
 	unsigned int i;
@@ -989,8 +958,8 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
 
 	for (i = 0; i < n; i++) {
 		first_byte = LOOKUP_FIRST_BYTE;
-		tbl24_index = (ips[i][0] << BYTES2_SIZE) |
-				(ips[i][1] << BYTE_SIZE) | ips[i][2];
+		tbl24_index = (ips[i].a[0] << BYTES2_SIZE) |
+				(ips[i].a[1] << BYTE_SIZE) | ips[i].a[2];
 
 		/* Calculate pointer to the first entry to be inspected */
 		tbl = &lpm->tbl24[tbl24_index];
@@ -999,7 +968,7 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
 			/* Continue inspecting following levels
 			 * until success or failure
 			 */
-			status = lookup_step(lpm, tbl, &tbl_next, ips[i],
+			status = lookup_step(lpm, tbl, &tbl_next, &ips[i],
 					first_byte++, &next_hop);
 			tbl = tbl_next;
 		} while (status == 1);
@@ -1017,21 +986,21 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
  * Look for a rule in the high-level rules table
  */
 int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 			 uint32_t *next_hop)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 
 	/* Check user arguments. */
 	if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
-			(depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+			(depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
-	return rule_find(lpm, masked_ip, depth, next_hop);
+	return rule_find(lpm, &masked_ip, depth, next_hop);
 }
 
 /*
@@ -1042,7 +1011,7 @@ rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   <0 on failure
  */
 static inline int
-rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
+rule_delete(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth)
 {
 	int ret;
 	struct rte_lpm6_rule_key rule_key;
@@ -1067,10 +1036,10 @@ rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
  */
 int
 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths,
+		struct rte_ipv6_addr *ips, uint8_t *depths,
 		unsigned n)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	unsigned i;
 
 	/* Check input arguments. */
@@ -1078,9 +1047,9 @@ rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
 		return -EINVAL;
 
 	for (i = 0; i < n; i++) {
-		ip6_copy_addr(masked_ip, ips[i]);
-		ip6_mask_addr(masked_ip, depths[i]);
-		rule_delete(lpm, masked_ip, depths[i]);
+		masked_ip = ips[i];
+		rte_ipv6_addr_mask(&masked_ip, depths[i]);
+		rule_delete(lpm, &masked_ip, depths[i]);
 	}
 
 	/*
@@ -1141,7 +1110,7 @@ depth_to_mask_1b(uint8_t depth)
  * Find a less specific rule
  */
 static int
-rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rule_find_less_specific(struct rte_lpm6 *lpm, struct rte_ipv6_addr *ip, uint8_t depth,
 	struct rte_lpm6_rule *rule)
 {
 	int ret;
@@ -1163,12 +1132,12 @@ rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
 			mask = depth_to_mask_1b(mask);
 
 		rule_key.depth = depth;
-		rule_key.ip[depth >> 3] &= mask;
+		rule_key.ip.a[depth >> 3] &= mask;
 
 		ret = rule_find_with_key(lpm, &rule_key, &next_hop);
 		if (ret) {
 			rule->depth = depth;
-			ip6_copy_addr(rule->ip, rule_key.ip);
+			rule->ip = rule_key.ip;
 			rule->next_hop = next_hop;
 			return 1;
 		}
@@ -1181,13 +1150,14 @@ rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
  * Find range of tbl8 cells occupied by a rule
  */
 static void
-rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rule_find_range(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 		  struct rte_lpm6_tbl_entry **from,
 		  struct rte_lpm6_tbl_entry **to,
 		  uint32_t *out_tbl_ind)
 {
 	uint32_t ind;
-	uint32_t first_3bytes = (uint32_t)ip[0] << 16 | ip[1] << 8 | ip[2];
+	uint32_t first_3bytes = (uint32_t)ip->a[0] << 16 |
+			ip->a[1] << 8 | ip->a[2];
 
 	if (depth <= 24) {
 		/* rule is within the top level */
@@ -1213,7 +1183,7 @@ rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 		 * until we reach the last one
 		 */
 		while (depth > 8) {
-			tbl += ip[byte];
+			tbl += ip->a[byte];
 			assert(tbl->ext_entry == 1);
 			/* go to the next level/tbl8 */
 			tbl_ind = tbl->lpm6_tbl8_gindex;
@@ -1224,7 +1194,7 @@ rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
 		}
 
 		/* last level/tbl8 */
-		ind = ip[byte] & depth_to_mask_1b(depth);
+		ind = ip->a[byte] & depth_to_mask_1b(depth);
 		*from = &tbl[ind];
 		ind += (1 << (8 - depth)) - 1;
 		*to = &tbl[ind];
@@ -1288,9 +1258,9 @@ remove_tbl(struct rte_lpm6 *lpm, struct rte_lpm_tbl8_hdr *tbl_hdr,
  * Deletes a rule
  */
 int
-rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth)
+rte_lpm6_delete(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth)
 {
-	uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr masked_ip;
 	struct rte_lpm6_rule lsp_rule_obj;
 	struct rte_lpm6_rule *lsp_rule;
 	int ret;
@@ -1298,25 +1268,25 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth)
 	struct rte_lpm6_tbl_entry *from, *to;
 
 	/* Check input arguments. */
-	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_IPV6_MAX_DEPTH))
 		return -EINVAL;
 
 	/* Copy the IP and mask it to avoid modifying user's input data. */
-	ip6_copy_addr(masked_ip, ip);
-	ip6_mask_addr(masked_ip, depth);
+	masked_ip = *ip;
+	rte_ipv6_addr_mask(&masked_ip, depth);
 
 	/* Delete the rule from the rule table. */
-	ret = rule_delete(lpm, masked_ip, depth);
+	ret = rule_delete(lpm, &masked_ip, depth);
 	if (ret < 0)
 		return -ENOENT;
 
 	/* find rule cells */
-	rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind);
+	rule_find_range(lpm, &masked_ip, depth, &from, &to, &tbl_ind);
 
 	/* find a less specific rule (a rule with smaller depth)
 	 * note: masked_ip will be modified, don't use it anymore
 	 */
-	ret = rule_find_less_specific(lpm, masked_ip, depth,
+	ret = rule_find_less_specific(lpm, &masked_ip, depth,
 			&lsp_rule_obj);
 	lsp_rule = ret ? &lsp_rule_obj : NULL;
 
diff --git a/lib/lpm/rte_lpm6.h b/lib/lpm/rte_lpm6.h
index c93683e6240c..d7ebe9245c8e 100644
--- a/lib/lpm/rte_lpm6.h
+++ b/lib/lpm/rte_lpm6.h
@@ -9,6 +9,9 @@
  * RTE Longest Prefix Match for IPv6 (LPM6)
  */
 
+#include <rte_common.h>
+#include <rte_ip6.h>
+
 #include <stdint.h>
 
 #ifdef __cplusplus
@@ -16,8 +19,8 @@ extern "C" {
 #endif
 
 
-#define RTE_LPM6_MAX_DEPTH               128
-#define RTE_LPM6_IPV6_ADDR_SIZE           16
+#define RTE_LPM6_MAX_DEPTH (RTE_DEPRECATED(RTE_LPM6_MAX_DEPTH) RTE_IPV6_MAX_DEPTH)
+#define RTE_LPM6_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_LPM6_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 /** Max number of characters in LPM name. */
 #define RTE_LPM6_NAMESIZE                 32
 
@@ -92,7 +95,7 @@ rte_lpm6_free(struct rte_lpm6 *lpm);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_add(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 	     uint32_t next_hop);
 
 /**
@@ -111,7 +114,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   1 if the rule exists, 0 if it does not, a negative value on failure
  */
 int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth,
 			 uint32_t *next_hop);
 
 /**
@@ -127,7 +130,7 @@ rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth);
+rte_lpm6_delete(struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint8_t depth);
 
 /**
  * Delete a rule from the LPM table.
@@ -145,7 +148,7 @@ rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth);
  */
 int
 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
+		struct rte_ipv6_addr *ips, uint8_t *depths, unsigned int n);
 
 /**
  * Delete all rules from the LPM table.
@@ -169,7 +172,7 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm);
  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
  */
 int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_hop);
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, const struct rte_ipv6_addr *ip, uint32_t *next_hop);
 
 /**
  * Lookup multiple IP addresses in an LPM table.
@@ -189,7 +192,7 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_ho
  */
 int
 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
-		uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+		struct rte_ipv6_addr *ips,
 		int32_t *next_hops, unsigned int n);
 
 #ifdef __cplusplus
diff --git a/lib/node/ip6_lookup.c b/lib/node/ip6_lookup.c
index 6bbcf14e2aa8..faaea5085938 100644
--- a/lib/node/ip6_lookup.c
+++ b/lib/node/ip6_lookup.c
@@ -74,7 +74,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 	/* Get stream for the speculated next node */
 	to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
 	while (n_left_from >= 4) {
-		uint8_t ip_batch[4][16];
+		struct rte_ipv6_addr ip_batch[4];
 		int32_t next_hop[4];
 		uint16_t next[4];
 
@@ -112,28 +112,28 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[0], &ipv6_hdr->dst_addr, 16);
+		ip_batch[0] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf1 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf1, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[1], &ipv6_hdr->dst_addr, 16);
+		ip_batch[1] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf2 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf2, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[2], &ipv6_hdr->dst_addr, 16);
+		ip_batch[2] = ipv6_hdr->dst_addr;
 
 		/* Extract DIP of mbuf3 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf3, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[3], &ipv6_hdr->dst_addr, 16);
+		ip_batch[3] = ipv6_hdr->dst_addr;
 
 		rte_lpm6_lookup_bulk_func(lpm6, ip_batch, next_hop, 4);
 
@@ -223,7 +223,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 		/* Extract TTL as IPv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
 
-		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr.a, &next_hop);
+		rc = rte_lpm6_lookup(lpm6, &ipv6_hdr->dst_addr, &next_hop);
 		next_hop = (rc == 0) ? next_hop : drop_nh;
 
 		node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
@@ -267,7 +267,7 @@ rte_node_ip6_route_add(const uint8_t *ip, uint8_t depth, uint16_t next_hop,
 	uint32_t val;
 	int ret;
 
-	memcpy(in6.s6_addr, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+	memcpy(in6.s6_addr, ip, RTE_IPV6_ADDR_SIZE);
 	inet_ntop(AF_INET6, &in6, abuf, sizeof(abuf));
 	/* Embedded next node id into 24 bit next hop */
 	val = ((next_node << 16) | next_hop) & ((1ull << 24) - 1);
@@ -278,8 +278,8 @@ rte_node_ip6_route_add(const uint8_t *ip, uint8_t depth, uint16_t next_hop,
 		if (!ip6_lookup_nm.lpm_tbl[socket])
 			continue;
 
-		ret = rte_lpm6_add(ip6_lookup_nm.lpm_tbl[socket], ip, depth,
-				   val);
+		ret = rte_lpm6_add(ip6_lookup_nm.lpm_tbl[socket],
+				   (const struct rte_ipv6_addr *)ip, depth, val);
 		if (ret < 0) {
 			node_err("ip6_lookup",
 				 "Unable to add entry %s / %d nh (%x) to LPM "
diff --git a/lib/table/rte_table_lpm_ipv6.c b/lib/table/rte_table_lpm_ipv6.c
index c1a7412f92cf..dea11130d3d5 100644
--- a/lib/table/rte_table_lpm_ipv6.c
+++ b/lib/table/rte_table_lpm_ipv6.c
@@ -207,7 +207,7 @@ rte_table_lpm_ipv6_entry_add(
 	}
 
 	/* Check if rule is already present in the table */
-	status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+	status = rte_lpm6_is_rule_present(lpm->lpm, &ip_prefix->ip,
 		ip_prefix->depth, &nht_pos0);
 	nht_pos0_valid = status > 0;
 
@@ -225,7 +225,7 @@ rte_table_lpm_ipv6_entry_add(
 	}
 
 	/* Add rule to low level LPM table */
-	if (rte_lpm6_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
+	if (rte_lpm6_add(lpm->lpm, &ip_prefix->ip, ip_prefix->depth,
 		nht_pos) < 0) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 rule add failed", __func__);
 		return -1;
@@ -270,7 +270,7 @@ rte_table_lpm_ipv6_entry_delete(
 	}
 
 	/* Return if rule is not present in the table */
-	status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+	status = rte_lpm6_is_rule_present(lpm->lpm, &ip_prefix->ip,
 		ip_prefix->depth, &nht_pos);
 	if (status < 0) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 algorithmic error",
@@ -283,7 +283,7 @@ rte_table_lpm_ipv6_entry_delete(
 	}
 
 	/* Delete rule from the low-level LPM table */
-	status = rte_lpm6_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
+	status = rte_lpm6_delete(lpm->lpm, &ip_prefix->ip, ip_prefix->depth);
 	if (status) {
 		TABLE_LOG(ERR, "%s: LPM IPv6 rule delete failed",
 			__func__);
@@ -323,11 +323,11 @@ rte_table_lpm_ipv6_lookup(
 
 		if (pkt_mask & pkts_mask) {
 			struct rte_mbuf *pkt = pkts[i];
-			uint8_t *ip = RTE_MBUF_METADATA_UINT8_PTR(pkt,
-				lpm->offset);
+			const struct rte_ipv6_addr *ip;
 			int status;
 			uint32_t nht_pos;
 
+			ip = (struct rte_ipv6_addr *)RTE_MBUF_METADATA_UINT8_PTR(pkt, lpm->offset);
 			status = rte_lpm6_lookup(lpm->lpm, ip, &nht_pos);
 			if (status == 0) {
 				pkts_out_mask |= pkt_mask;
diff --git a/lib/table/rte_table_lpm_ipv6.h b/lib/table/rte_table_lpm_ipv6.h
index 166a5ba9ee67..3ea888360635 100644
--- a/lib/table/rte_table_lpm_ipv6.h
+++ b/lib/table/rte_table_lpm_ipv6.h
@@ -39,13 +39,16 @@
 
 #include <stdint.h>
 
+#include <rte_common.h>
+#include <rte_ip6.h>
+
 #include "rte_table.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#define RTE_LPM_IPV6_ADDR_SIZE 16
+#define RTE_LPM_IPV6_ADDR_SIZE (RTE_DEPRECATED(RTE_LPM_IPV6_ADDR_SIZE) RTE_IPV6_ADDR_SIZE)
 
 /** LPM table parameters */
 struct rte_table_lpm_ipv6_params {
@@ -73,7 +76,7 @@ each rule covering for a multitude of lookup keys (destination IP addresses)
 that share the same data (next hop). */
 struct rte_table_lpm_ipv6_key {
 	/** IP address */
-	uint8_t ip[RTE_LPM_IPV6_ADDR_SIZE];
+	struct rte_ipv6_addr ip;
 
 	/** IP address depth. The most significant "depth" bits of the IP
 	address specify the network part of the IP address, while the rest of
-- 
2.47.0


^ permalink raw reply	[relevance 1%]

* [PATCH dpdk v5 04/17] net: use IPv6 structure for packet headers
  @ 2024-10-18 14:05  1%   ` Robin Jarry
  2024-10-18 14:05  1%   ` [PATCH dpdk v5 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
  2024-10-18 14:05  2%   ` [PATCH dpdk v5 07/17] rib6: " Robin Jarry
  2 siblings, 0 replies; 169+ results
From: Robin Jarry @ 2024-10-18 14:05 UTC (permalink / raw)
  To: dev, Wisam Jaddo, Cristian Dumitrescu, Konstantin Ananyev,
	Yipeng Wang, Sameh Gobriel, Bruce Richardson, Vladimir Medvedkin,
	Ajit Khaparde, Somnath Kotur, Chas Williams, Min Hu (Connor),
	Potnuri Bharat Teja, Hemant Agrawal, Sachin Saxena, Ziyang Xuan,
	Xiaoyun Wang, Jie Hai, Yisen Zhuang, Jingjing Wu,
	Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam,
	Suanming Mou, Matan Azrad, Liron Himi, Chaoyong He,
	Devendra Singh Rawat, Alok Prasad, Andrew Rybchenko,
	Stephen Hemminger, Jiawen Wu, Jian Wang, Radu Nicolau,
	Akhil Goyal, Thomas Monjalon, Ferruh Yigit, Nithin Dabilpuram,
	Pavan Nikhilesh

The rte_ipv6_hdr uses ad-hoc uint8_t[16] arrays to represent addresses.
Replace these arrays with the newly added rte_ipv6_addr structure. Adapt
all code accordingly.

Signed-off-by: Robin Jarry <rjarry@redhat.com>
---
 app/test-flow-perf/items_gen.c           |  4 +--
 app/test-pipeline/pipeline_hash.c        |  4 +--
 app/test/packet_burst_generator.c        |  4 +--
 app/test/test_ipfrag.c                   |  4 +--
 app/test/test_reassembly_perf.c          | 23 +++++++-------
 app/test/test_thash.c                    |  8 ++---
 doc/guides/rel_notes/deprecation.rst     |  2 --
 doc/guides/rel_notes/release_24_11.rst   |  6 ++++
 drivers/net/bnxt/bnxt_flow.c             | 12 ++++----
 drivers/net/bonding/rte_eth_bond_pmd.c   |  6 ++--
 drivers/net/cxgbe/cxgbe_flow.c           | 14 ++++-----
 drivers/net/dpaa2/dpaa2_flow.c           | 22 +++++++-------
 drivers/net/hinic/hinic_pmd_flow.c       |  6 ++--
 drivers/net/hinic/hinic_pmd_tx.c         |  2 +-
 drivers/net/hns3/hns3_flow.c             |  8 ++---
 drivers/net/i40e/i40e_flow.c             | 12 ++++----
 drivers/net/iavf/iavf_fdir.c             |  8 ++---
 drivers/net/iavf/iavf_fsub.c             |  8 ++---
 drivers/net/iavf/iavf_ipsec_crypto.c     |  6 ++--
 drivers/net/ice/ice_fdir_filter.c        | 12 ++++----
 drivers/net/ice/ice_switch_filter.c      | 16 +++++-----
 drivers/net/igc/igc_flow.c               |  4 +--
 drivers/net/ixgbe/ixgbe_flow.c           | 12 ++++----
 drivers/net/ixgbe/ixgbe_ipsec.c          |  4 +--
 drivers/net/mlx5/hws/mlx5dr_definer.c    | 36 +++++++++++-----------
 drivers/net/mlx5/mlx5_flow.c             |  6 ++--
 drivers/net/mlx5/mlx5_flow_dv.c          | 16 ++++------
 drivers/net/mlx5/mlx5_flow_hw.c          | 10 +++----
 drivers/net/mlx5/mlx5_flow_verbs.c       |  8 ++---
 drivers/net/mvpp2/mrvl_flow.c            | 16 ++++------
 drivers/net/nfp/flower/nfp_flower_flow.c | 32 ++++++++++----------
 drivers/net/nfp/nfp_net_flow.c           | 38 +++++++++++-------------
 drivers/net/qede/qede_filter.c           |  4 +--
 drivers/net/sfc/sfc_flow.c               | 22 +++++---------
 drivers/net/tap/tap_flow.c               | 10 +++----
 drivers/net/txgbe/txgbe_flow.c           | 12 ++++----
 drivers/net/txgbe/txgbe_ipsec.c          |  4 +--
 examples/ip_fragmentation/main.c         |  2 +-
 examples/ip_pipeline/pipeline.c          | 16 +++++-----
 examples/ip_reassembly/main.c            |  2 +-
 examples/ipsec-secgw/flow.c              | 33 ++++----------------
 examples/ipsec-secgw/ipsec.c             |  8 ++---
 examples/ipsec-secgw/sa.c                |  4 +--
 examples/ipsec-secgw/sad.h               | 10 ++++---
 examples/l3fwd/l3fwd_fib.c               |  2 +-
 examples/l3fwd/l3fwd_lpm.c               |  4 +--
 lib/ethdev/rte_flow.h                    |  6 ++--
 lib/hash/rte_thash.h                     | 12 ++++----
 lib/ip_frag/rte_ipv6_reassembly.c        |  4 +--
 lib/net/rte_ip6.h                        |  6 ++--
 lib/node/ip6_lookup.c                    | 10 +++----
 lib/pipeline/rte_swx_ipsec.c             |  6 ++--
 lib/pipeline/rte_table_action.c          | 24 +++++++--------
 53 files changed, 260 insertions(+), 310 deletions(-)

diff --git a/app/test-flow-perf/items_gen.c b/app/test-flow-perf/items_gen.c
index 4ae72509d445..c740e1838ffb 100644
--- a/app/test-flow-perf/items_gen.c
+++ b/app/test-flow-perf/items_gen.c
@@ -78,8 +78,8 @@ add_ipv6(struct rte_flow_item *items,
 	for (i = 0; i < 16; i++) {
 		/* Currently src_ip is limited to 32 bit */
 		if (i < 4)
-			ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i * 8);
-		ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
+			ipv6_specs[ti].hdr.src_addr.a[15 - i] = para.src_ip >> (i * 8);
+		ipv6_masks[ti].hdr.src_addr.a[15 - i] = 0xff;
 	}
 
 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
diff --git a/app/test-pipeline/pipeline_hash.c b/app/test-pipeline/pipeline_hash.c
index cab9c2098014..194e5c5dcc53 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/app/test-pipeline/pipeline_hash.c
@@ -432,7 +432,6 @@ app_main_loop_rx_metadata(void) {
 				struct rte_ipv4_hdr *ip_hdr;
 				struct rte_ipv6_hdr *ipv6_hdr;
 				uint32_t ip_dst;
-				uint8_t *ipv6_dst;
 				uint32_t *signature, *k32;
 
 				m = app.mbuf_rx.array[j];
@@ -452,9 +451,8 @@ app_main_loop_rx_metadata(void) {
 				} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
 					ipv6_hdr = (struct rte_ipv6_hdr *)
 						&m_data[sizeof(struct rte_ether_hdr)];
-					ipv6_dst = ipv6_hdr->dst_addr;
 
-					memcpy(key, ipv6_dst, 16);
+					memcpy(key, &ipv6_hdr->dst_addr, 16);
 				} else
 					continue;
 
diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 867a88da0055..c9ff5257f070 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -148,8 +148,8 @@ initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
 	ip_hdr->proto = IPPROTO_UDP;
 	ip_hdr->hop_limits = IP_DEFTTL;
 
-	rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
-	rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
+	rte_memcpy(&ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
+	rte_memcpy(&ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
 
 	return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
 }
diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index 8e4df220a214..18d672715729 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -238,8 +238,8 @@ v6_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s, uint8_t ttl,
 	hdr->proto = proto;
 	hdr->hop_limits = ttl;
 
-	memset(hdr->src_addr, 0x08, sizeof(hdr->src_addr));
-	memset(hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
+	memset(&hdr->src_addr, 0x08, sizeof(hdr->src_addr));
+	memset(&hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
 }
 
 static inline void
diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c
index 3912179022fc..15db19add917 100644
--- a/app/test/test_reassembly_perf.c
+++ b/app/test/test_reassembly_perf.c
@@ -8,6 +8,7 @@
 #include <rte_ether.h>
 #include <rte_hexdump.h>
 #include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_ip_frag.h>
 #include <rte_mbuf.h>
 #include <rte_mbuf_pool_ops.h>
@@ -36,7 +37,7 @@
 #define IP_DST_ADDR(x) ((198U << 24) | (18 << 16) | (1 << 15) | (x))
 
 /* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
-static uint8_t ip6_addr[16] = {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct rte_ipv6_addr ip6_addr = RTE_IPV6(0x2001, 0x0200, 0, 0, 0, 0, 0, 0);
 #define IP6_VERSION 6
 
 #define IP_DEFTTL 64 /* from RFC 1340. */
@@ -340,17 +341,17 @@ ipv6_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id,
 			rte_cpu_to_be_16(pkt_len - sizeof(struct rte_ipv6_hdr));
 		ip_hdr->proto = IPPROTO_FRAGMENT;
 		ip_hdr->hop_limits = IP_DEFTTL;
-		memcpy(ip_hdr->src_addr, ip6_addr, sizeof(ip_hdr->src_addr));
-		memcpy(ip_hdr->dst_addr, ip6_addr, sizeof(ip_hdr->dst_addr));
-		ip_hdr->src_addr[7] = (flow_id >> 16) & 0xf;
-		ip_hdr->src_addr[7] |= 0x10;
-		ip_hdr->src_addr[8] = (flow_id >> 8) & 0xff;
-		ip_hdr->src_addr[9] = flow_id & 0xff;
+		ip_hdr->src_addr = ip6_addr;
+		ip_hdr->dst_addr = ip6_addr;
+		ip_hdr->src_addr.a[7] = (flow_id >> 16) & 0xf;
+		ip_hdr->src_addr.a[7] |= 0x10;
+		ip_hdr->src_addr.a[8] = (flow_id >> 8) & 0xff;
+		ip_hdr->src_addr.a[9] = flow_id & 0xff;
 
-		ip_hdr->dst_addr[7] = (flow_id >> 16) & 0xf;
-		ip_hdr->dst_addr[7] |= 0x20;
-		ip_hdr->dst_addr[8] = (flow_id >> 8) & 0xff;
-		ip_hdr->dst_addr[9] = flow_id & 0xff;
+		ip_hdr->dst_addr.a[7] = (flow_id >> 16) & 0xf;
+		ip_hdr->dst_addr.a[7] |= 0x20;
+		ip_hdr->dst_addr.a[8] = (flow_id >> 8) & 0xff;
+		ip_hdr->dst_addr.a[9] = flow_id & 0xff;
 
 		frag_hdr->next_header = IPPROTO_UDP;
 		frag_hdr->reserved = 0;
diff --git a/app/test/test_thash.c b/app/test/test_thash.c
index 65d42fd90085..952da6a52954 100644
--- a/app/test/test_thash.c
+++ b/app/test/test_thash.c
@@ -145,10 +145,10 @@ test_toeplitz_hash_calc(void)
 	}
 	for (i = 0; i < RTE_DIM(v6_tbl); i++) {
 		/*Fill ipv6 hdr*/
-		for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr); j++)
-			ipv6_hdr.src_addr[j] = v6_tbl[i].src_ip[j];
-		for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr); j++)
-			ipv6_hdr.dst_addr[j] = v6_tbl[i].dst_ip[j];
+		for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr.a); j++)
+			ipv6_hdr.src_addr.a[j] = v6_tbl[i].src_ip[j];
+		for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr.a); j++)
+			ipv6_hdr.dst_addr.a[j] = v6_tbl[i].dst_ip[j];
 		/*Load and convert ipv6 address into tuple*/
 		rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
 		tuple.v6.sport = v6_tbl[i].src_port;
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 20fcfedb7b89..830904203c38 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -87,8 +87,6 @@ Deprecation Notices
     - ``rte_lpm6_delete_bulk_func()``
     - ``rte_lpm6_lookup()``
     - ``rte_lpm6_lookup_bulk_func()``
-  net
-    - ``struct rte_ipv6_hdr``
   node
     - ``rte_node_ip6_route_add()``
   pipeline
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index e68676caf029..de24705ef662 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -286,6 +286,12 @@ API Changes
 * drivers/net/ena: Removed ``enable_llq``, ``normal_llq_hdr`` and ``large_llq_hdr`` devargs
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
+* net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays.
+  The following libraries and symbols were modified:
+
+  net
+    - ``struct rte_ipv6_hdr``
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 03413e912149..c41403c753cf 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -424,22 +424,22 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
 
 			rte_memcpy(filter->src_ipaddr,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(filter->dst_ipaddr,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 
-			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr.a,
 						   16)) {
 				rte_memcpy(filter->src_ipaddr_mask,
-					   ipv6_mask->hdr.src_addr, 16);
+					   &ipv6_mask->hdr.src_addr, 16);
 				en |= !use_ntuple ? 0 :
 				    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
 			}
 
-			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr.a,
 						   16)) {
 				rte_memcpy(filter->dst_ipaddr_mask,
-					   ipv6_mask->hdr.dst_addr, 16);
+					   &ipv6_mask->hdr.dst_addr, 16);
 				en |= !use_ntuple ? 0 :
 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
 			}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 34131f0e35f6..cda1c37124f4 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -689,10 +689,8 @@ ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
 static inline uint32_t
 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
 {
-	unaligned_uint32_t *word_src_addr =
-		(unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
-	unaligned_uint32_t *word_dst_addr =
-		(unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
+	unaligned_uint32_t *word_src_addr = (unaligned_uint32_t *)&ipv6_hdr->src_addr;
+	unaligned_uint32_t *word_dst_addr = (unaligned_uint32_t *)&ipv6_hdr->dst_addr;
 
 	return (word_src_addr[0] ^ word_dst_addr[0]) ^
 			(word_src_addr[1] ^ word_dst_addr[1]) ^
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 40d21e694409..b6d169097c1a 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -411,15 +411,15 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
 			      RTE_IPV6_HDR_TC_SHIFT,
 			      tos);
 
-	if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
+	if (memcmp(&val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
 	    (umask &&
-	     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
+	     memcmp(&umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
 				     lip);
 
-	if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
+	if (memcmp(&val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
 	    (umask &&
-	     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
+	     memcmp(&umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
 				     fip);
 
@@ -918,10 +918,8 @@ static struct chrte_fparse parseitem[] = {
 		.fptr  = ch_rte_parsetype_ipv6,
 		.dmask = &(const struct rte_flow_item_ipv6) {
 			.hdr = {
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 				.vtc_flow = RTE_BE32(0xff000000),
 			},
 		},
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 1b55d8dd173b..54b17e97c031 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -117,10 +117,8 @@ static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
 
 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 		.proto = 0xff
 	},
 };
@@ -1478,16 +1476,16 @@ dpaa2_configure_flow_generic_ip(
 		mask_ipv4->hdr.dst_addr)) {
 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
 	} else if (mask_ipv6 &&
-		(memcmp((const char *)mask_ipv6->hdr.src_addr,
+		(memcmp(&mask_ipv6->hdr.src_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
-		memcmp((const char *)mask_ipv6->hdr.dst_addr,
+		memcmp(&mask_ipv6->hdr.dst_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
 	}
 
 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
 		(mask_ipv6 &&
-			memcmp((const char *)mask_ipv6->hdr.src_addr,
+			memcmp(&mask_ipv6->hdr.src_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		index = dpaa2_flow_extract_search(
 				&priv->extract.qos_key_extract.dpkg,
@@ -1526,13 +1524,13 @@ dpaa2_configure_flow_generic_ip(
 		if (spec_ipv4)
 			key = &spec_ipv4->hdr.src_addr;
 		else
-			key = &spec_ipv6->hdr.src_addr[0];
+			key = &spec_ipv6->hdr.src_addr;
 		if (mask_ipv4) {
 			mask = &mask_ipv4->hdr.src_addr;
 			size = NH_FLD_IPV4_ADDR_SIZE;
 			prot = NET_PROT_IPV4;
 		} else {
-			mask = &mask_ipv6->hdr.src_addr[0];
+			mask = &mask_ipv6->hdr.src_addr;
 			size = NH_FLD_IPV6_ADDR_SIZE;
 			prot = NET_PROT_IPV6;
 		}
@@ -1569,7 +1567,7 @@ dpaa2_configure_flow_generic_ip(
 
 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
 		(mask_ipv6 &&
-			memcmp((const char *)mask_ipv6->hdr.dst_addr,
+			memcmp(&mask_ipv6->hdr.dst_addr,
 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
 		index = dpaa2_flow_extract_search(
 				&priv->extract.qos_key_extract.dpkg,
@@ -1616,13 +1614,13 @@ dpaa2_configure_flow_generic_ip(
 		if (spec_ipv4)
 			key = &spec_ipv4->hdr.dst_addr;
 		else
-			key = spec_ipv6->hdr.dst_addr;
+			key = &spec_ipv6->hdr.dst_addr;
 		if (mask_ipv4) {
 			mask = &mask_ipv4->hdr.dst_addr;
 			size = NH_FLD_IPV4_ADDR_SIZE;
 			prot = NET_PROT_IPV4;
 		} else {
-			mask = &mask_ipv6->hdr.dst_addr[0];
+			mask = &mask_ipv6->hdr.dst_addr;
 			size = NH_FLD_IPV6_ADDR_SIZE;
 			prot = NET_PROT_IPV6;
 		}
diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c
index d1a564a16303..8fdd5a35be9f 100644
--- a/drivers/net/hinic/hinic_pmd_flow.c
+++ b/drivers/net/hinic/hinic_pmd_flow.c
@@ -962,7 +962,7 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 
 		/* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
 		for (i = 0; i < 16; i++) {
-			if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
+			if (ipv6_mask->hdr.src_addr.a[i] == UINT8_MAX) {
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM, item,
 					"Not supported by fdir filter, do not support src ipv6");
@@ -978,13 +978,13 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
 		}
 
 		for (i = 0; i < 16; i++) {
-			if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
+			if (ipv6_mask->hdr.dst_addr.a[i] == UINT8_MAX)
 				rule->mask.dst_ipv6_mask |= 1 << i;
 		}
 
 		ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
 		rte_memcpy(rule->hinic_fdir.dst_ipv6,
-			   ipv6_spec->hdr.dst_addr, 16);
+			   &ipv6_spec->hdr.dst_addr, 16);
 
 		/*
 		 * Check if the next not void item is TCP or UDP or ICMP.
diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index f09b1a6e1ea6..22fb0bffafcc 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -743,7 +743,7 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
 	else
 		psd_hdr.len = ipv6_hdr->payload_len;
 
-	sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+	sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
 		sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0);
 	sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
 	return __rte_raw_cksum_reduce(sum);
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 37eb2b4c3807..bf1eee506dde 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -822,10 +822,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 						  "Only support src & dst ip,proto in IPV6");
 		}
 		net_addr_to_host(rule->key_conf.mask.src_ip,
-				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
+				 (const rte_be32_t *)&ipv6_mask->hdr.src_addr,
 				 IP_ADDR_LEN);
 		net_addr_to_host(rule->key_conf.mask.dst_ip,
-				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
+				 (const rte_be32_t *)&ipv6_mask->hdr.dst_addr,
 				 IP_ADDR_LEN);
 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
@@ -838,10 +838,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
 	ipv6_spec = item->spec;
 	net_addr_to_host(rule->key_conf.spec.src_ip,
-			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
+			 (const rte_be32_t *)&ipv6_spec->hdr.src_addr,
 			 IP_ADDR_LEN);
 	net_addr_to_host(rule->key_conf.spec.dst_ip,
-			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
+			 (const rte_be32_t *)&ipv6_spec->hdr.dst_addr,
 			 IP_ADDR_LEN);
 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 92165c8422d5..c6857727e8be 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1953,13 +1953,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					return -rte_errno;
 				}
 
-				if (!memcmp(ipv6_mask->hdr.src_addr,
+				if (!memcmp(&ipv6_mask->hdr.src_addr,
 					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					    sizeof(ipv6_mask->hdr.src_addr)))
 					input_set |= I40E_INSET_IPV6_SRC;
-				if (!memcmp(ipv6_mask->hdr.dst_addr,
+				if (!memcmp(&ipv6_mask->hdr.dst_addr,
 					    ipv6_addr_mask,
-					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					    sizeof(ipv6_mask->hdr.dst_addr)))
 					input_set |= I40E_INSET_IPV6_DST;
 
 				if ((ipv6_mask->hdr.vtc_flow &
@@ -1987,9 +1987,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					I40E_FDIR_IPTYPE_IPV6;
 
 				rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
-					   ipv6_spec->hdr.src_addr, 16);
+					   &ipv6_spec->hdr.src_addr, 16);
 				rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
-					   ipv6_spec->hdr.dst_addr, 16);
+					   &ipv6_spec->hdr.dst_addr, 16);
 
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 811a10287b70..321346425465 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1048,14 +1048,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 								 HOP_LIMIT);
 			}
 
-			if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.src_addr))) {
+			if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.src_addr))) {
 				input_set |= IAVF_INSET_IPV6_SRC;
 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
 								 SRC);
 			}
-			if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+			if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.dst_addr))) {
 				input_set |= IAVF_INSET_IPV6_DST;
 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
 								 DST);
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 74e1e7099b8c..eb5a3feab189 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -354,23 +354,23 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 				}
 
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						*input |= IAVF_INSET_IPV6_SRC;
 						break;
 					}
 				}
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						*input |= IAVF_INSET_IPV6_DST;
 						break;
 					}
 				}
 
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j])
+					if (ipv6_mask->hdr.src_addr.a[j])
 						input_set_byte++;
 
-					if (ipv6_mask->hdr.dst_addr[j])
+					if (ipv6_mask->hdr.dst_addr.a[j])
 						input_set_byte++;
 				}
 
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 6fd45ff45f3d..89dd5af5500f 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1738,8 +1738,8 @@ static void
 parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
 		struct rte_ipv6_hdr *ipv6)
 {
-	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
-	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+	ipv6->src_addr = item->hdr.src_addr;
+	ipv6->dst_addr = item->hdr.dst_addr;
 }
 
 static void
@@ -1904,7 +1904,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad,
 			ipsec_flow->spi,
 			0,
 			0,
-			ipsec_flow->ipv6_hdr.dst_addr,
+			ipsec_flow->ipv6_hdr.dst_addr.a,
 			0,
 			ipsec_flow->is_udp,
 			ipsec_flow->udp_hdr.dst_port);
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 741107f93939..406918fed547 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -2097,11 +2097,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 				return -rte_errno;
 			}
 
-			if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.src_addr)))
+			if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.src_addr)))
 				*input_set |= ICE_INSET_IPV6_SRC;
-			if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
-				    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+			if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+				    sizeof(ipv6_mask->hdr.dst_addr)))
 				*input_set |= ICE_INSET_IPV6_DST;
 
 			if ((ipv6_mask->hdr.vtc_flow &
@@ -2113,8 +2113,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
 				*input_set |= ICE_INSET_IPV6_HOP_LIMIT;
 
-			rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
-			rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(&p_v6->dst_ip, &ipv6_spec->hdr.dst_addr, 16);
+			rte_memcpy(&p_v6->src_ip, &ipv6_spec->hdr.src_addr, 16);
 			vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
 			p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
 			p_v6->proto = ipv6_spec->hdr.proto;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 122b87f625a7..28bc775a2c34 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -665,13 +665,13 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
 				}
 
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						*input |= ICE_INSET_IPV6_SRC;
 						break;
 					}
 				}
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						*input |= ICE_INSET_IPV6_DST;
 						break;
 					}
@@ -691,18 +691,18 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
 				f = &list[t].h_u.ipv6_hdr;
 				s = &list[t].m_u.ipv6_hdr;
 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
-					if (ipv6_mask->hdr.src_addr[j]) {
+					if (ipv6_mask->hdr.src_addr.a[j]) {
 						f->src_addr[j] =
-						ipv6_spec->hdr.src_addr[j];
+						ipv6_spec->hdr.src_addr.a[j];
 						s->src_addr[j] =
-						ipv6_mask->hdr.src_addr[j];
+						ipv6_mask->hdr.src_addr.a[j];
 						input_set_byte++;
 					}
-					if (ipv6_mask->hdr.dst_addr[j]) {
+					if (ipv6_mask->hdr.dst_addr.a[j]) {
 						f->dst_addr[j] =
-						ipv6_spec->hdr.dst_addr[j];
+						ipv6_spec->hdr.dst_addr.a[j];
 						s->dst_addr[j] =
-						ipv6_mask->hdr.dst_addr[j];
+						ipv6_mask->hdr.dst_addr.a[j];
 						input_set_byte++;
 					}
 				}
diff --git a/drivers/net/igc/igc_flow.c b/drivers/net/igc/igc_flow.c
index b677a0d61340..b778ac26135a 100644
--- a/drivers/net/igc/igc_flow.c
+++ b/drivers/net/igc/igc_flow.c
@@ -435,8 +435,8 @@ igc_parse_pattern_ipv6(const struct rte_flow_item *item,
 	if (mask->hdr.vtc_flow ||
 		mask->hdr.payload_len ||
 		mask->hdr.hop_limits ||
-		!igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
-		!igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
+		!igc_is_zero_ipv6_addr(&mask->hdr.src_addr) ||
+		!igc_is_zero_ipv6_addr(&mask->hdr.dst_addr))
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM, item,
 				"IPv6 only support protocol");
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 687341c6b8d3..1b35ed5faabe 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1917,9 +1917,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
 		/* check src addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.src_addr[j] == 0) {
+			if (ipv6_mask->hdr.src_addr.a[j] == 0) {
 				rule->mask.src_ipv6_mask &= ~(1 << j);
-			} else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) {
+			} else if (ipv6_mask->hdr.src_addr.a[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1930,9 +1930,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
 		/* check dst addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.dst_addr[j] == 0) {
+			if (ipv6_mask->hdr.dst_addr.a[j] == 0) {
 				rule->mask.dst_ipv6_mask &= ~(1 << j);
-			} else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+			} else if (ipv6_mask->hdr.dst_addr.a[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1945,9 +1945,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 			rule->b_spec = TRUE;
 			ipv6_spec = item->spec;
 			rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 		}
 
 		/**
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 3a666ba15f59..778004cbe4d2 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -681,9 +681,9 @@ ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
 			ic_session->src_ip.type = IPv6;
 			ic_session->dst_ip.type = IPv6;
 			rte_memcpy(ic_session->src_ip.ipv6,
-				   ipv6->hdr.src_addr, 16);
+				   &ipv6->hdr.src_addr, 16);
 			rte_memcpy(ic_session->dst_ip.ipv6,
-				   ipv6->hdr.dst_addr, 16);
+				   &ipv6->hdr.dst_addr, 16);
 		} else {
 			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
 			ic_session->src_ip.type = IPv4;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 10b986d66bd7..a9fa5d06edcc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -176,14 +176,14 @@ struct mlx5dr_definer_conv_data {
 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
 	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr[0],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr[4],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr[8],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr[12],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr[0],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr[4],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr[8],	rte_flow_item_ipv6) \
-	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr[12],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr.a[0],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr.a[4],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr.a[8],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr.a[12],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr.a[0],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr.a[4],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr.a[8],	rte_flow_item_ipv6) \
+	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr.a[12],	rte_flow_item_ipv6) \
 	X(SET,		ipv6_version,		STE_IPV6,		rte_flow_item_ipv6) \
 	X(SET,		ipv6_frag,		v->has_frag_ext,	rte_flow_item_ipv6) \
 	X(SET,		icmp_protocol,		STE_ICMP,		rte_flow_item_icmp) \
@@ -1161,8 +1161,8 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
 	    m->has_hip_ext || m->has_shim6_ext ||
 	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
-		   !is_mem_zero(l->hdr.src_addr, 16) ||
-		   !is_mem_zero(l->hdr.dst_addr, 16)))) {
+		   !is_mem_zero(l->hdr.src_addr.a, 16) ||
+		   !is_mem_zero(l->hdr.dst_addr.a, 16)))) {
 		rte_errno = ENOTSUP;
 		return rte_errno;
 	}
@@ -1219,56 +1219,56 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 4, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 4, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 8, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 8, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.src_addr + 12, 4)) {
+	if (!is_mem_zero(m->hdr.src_addr.a + 12, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
 		DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 4, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 4, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 8, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 8, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
 	}
 
-	if (!is_mem_zero(m->hdr.dst_addr + 12, 4)) {
+	if (!is_mem_zero(m->hdr.dst_addr.a + 12, 4)) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
 		fc->item_idx = item_idx;
 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index effc61cdc9da..7f8640b488b8 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2933,10 +2933,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
 	const struct rte_flow_item_ipv6 *spec = item->spec;
 	const struct rte_flow_item_ipv6 nic_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.vtc_flow = RTE_BE32(0xffffffff),
 			.proto = 0xff,
 		},
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5f71573a86d6..201e215e4bad 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7696,10 +7696,8 @@ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
 
 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 		.vtc_flow = RTE_BE32(0xffffffff),
 		.proto = 0xff,
 		.hop_limits = 0xff,
@@ -9548,10 +9546,8 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv6 *ipv6_v;
 	const struct rte_flow_item_ipv6 nic_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.vtc_flow = RTE_BE32(0xffffffff),
 			.proto = 0xff,
 			.hop_limits = 0xff,
@@ -9574,11 +9570,11 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
 	for (i = 0; i < size; ++i)
-		l24_v[i] = ipv6_m->hdr.dst_addr[i] & ipv6_v->hdr.dst_addr[i];
+		l24_v[i] = ipv6_m->hdr.dst_addr.a[i] & ipv6_v->hdr.dst_addr.a[i];
 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
 	for (i = 0; i < size; ++i)
-		l24_v[i] = ipv6_m->hdr.src_addr[i] & ipv6_v->hdr.src_addr[i];
+		l24_v[i] = ipv6_m->hdr.src_addr.a[i] & ipv6_v->hdr.src_addr.a[i];
 	/* TOS. */
 	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index c5ddd1d40433..0084f819804f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8301,10 +8301,8 @@ const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
 		.payload_len = RTE_BE16(0xffff),
 		.proto = 0xff,
 		.hop_limits = 0xff,
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 	},
 	.has_frag_ext = 1,
 };
@@ -14741,10 +14739,10 @@ flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			memcpy(data.dst.ipv6_addr,
-			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
+			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
 			       sizeof(data.dst.ipv6_addr));
 			memcpy(data.src.ipv6_addr,
-			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
+			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
 			       sizeof(data.src.ipv6_addr));
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 3a4356c0f650..5b4a4eda3bbc 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -600,13 +600,13 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
 		uint32_t vtc_flow_val;
 		uint32_t vtc_flow_mask;
 
-		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+		memcpy(&ipv6.val.src_ip, &spec->hdr.src_addr,
 		       RTE_DIM(ipv6.val.src_ip));
-		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+		memcpy(&ipv6.val.dst_ip, &spec->hdr.dst_addr,
 		       RTE_DIM(ipv6.val.dst_ip));
-		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+		memcpy(&ipv6.mask.src_ip, &mask->hdr.src_addr,
 		       RTE_DIM(ipv6.mask.src_ip));
-		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+		memcpy(&ipv6.mask.dst_ip, &mask->hdr.dst_addr,
 		       RTE_DIM(ipv6.mask.dst_ip));
 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
index e74a5f83f55b..098523ada653 100644
--- a/drivers/net/mvpp2/mrvl_flow.c
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -536,27 +536,23 @@ mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
 	       int parse_dst, struct rte_flow *flow)
 {
 	struct pp2_cls_rule_key_field *key_field;
-	int size = sizeof(spec->hdr.dst_addr);
-	struct in6_addr k, m;
+	struct rte_ipv6_addr k, m;
 
-	memset(&k, 0, sizeof(k));
 	if (parse_dst) {
-		memcpy(k.s6_addr, spec->hdr.dst_addr, size);
-		memcpy(m.s6_addr, mask->hdr.dst_addr, size);
-
+		k = spec->hdr.dst_addr;
+		m = mask->hdr.dst_addr;
 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
 			MV_NET_IP6_F_DA;
 	} else {
-		memcpy(k.s6_addr, spec->hdr.src_addr, size);
-		memcpy(m.s6_addr, mask->hdr.src_addr, size);
-
+		k = spec->hdr.src_addr;
+		m = mask->hdr.src_addr;
 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
 			MV_NET_IP6_F_SA;
 	}
 
 	key_field = &flow->rule.fields[flow->rule.num_fields];
 	mrvl_alloc_key_mask(key_field);
-	key_field->size = 16;
+	key_field->size = RTE_IPV6_ADDR_SIZE;
 
 	inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
 	inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c
index e94c7e22e371..43574afea8ac 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -2066,18 +2066,18 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
 
 			ipv6_gre_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 			ipv6_gre_tun->ip_ext.ttl = hdr->hop_limits;
-			memcpy(ipv6_gre_tun->ipv6.ipv6_src, hdr->src_addr,
+			memcpy(ipv6_gre_tun->ipv6.ipv6_src, &hdr->src_addr,
 					sizeof(ipv6_gre_tun->ipv6.ipv6_src));
-			memcpy(ipv6_gre_tun->ipv6.ipv6_dst, hdr->dst_addr,
+			memcpy(ipv6_gre_tun->ipv6.ipv6_dst, &hdr->dst_addr,
 					sizeof(ipv6_gre_tun->ipv6.ipv6_dst));
 		} else {
 			ipv6_udp_tun = (struct nfp_flower_ipv6_udp_tun *)(*param->mbuf_off);
 
 			ipv6_udp_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 			ipv6_udp_tun->ip_ext.ttl = hdr->hop_limits;
-			memcpy(ipv6_udp_tun->ipv6.ipv6_src, hdr->src_addr,
+			memcpy(ipv6_udp_tun->ipv6.ipv6_src, &hdr->src_addr,
 					sizeof(ipv6_udp_tun->ipv6.ipv6_src));
-			memcpy(ipv6_udp_tun->ipv6.ipv6_dst, hdr->dst_addr,
+			memcpy(ipv6_udp_tun->ipv6.ipv6_dst, &hdr->dst_addr,
 					sizeof(ipv6_udp_tun->ipv6.ipv6_dst));
 		}
 	} else {
@@ -2100,8 +2100,8 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
 		ipv6->ip_ext.tos   = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
 		ipv6->ip_ext.proto = hdr->proto;
 		ipv6->ip_ext.ttl   = hdr->hop_limits;
-		memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src));
-		memcpy(ipv6->ipv6_dst, hdr->dst_addr, sizeof(ipv6->ipv6_dst));
+		memcpy(ipv6->ipv6_src, &hdr->src_addr, sizeof(ipv6->ipv6_src));
+		memcpy(ipv6->ipv6_dst, &hdr->dst_addr, sizeof(ipv6->ipv6_dst));
 
 ipv6_end:
 		*param->mbuf_off += sizeof(struct nfp_flower_ipv6);
@@ -2557,10 +2557,8 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 				.vtc_flow   = RTE_BE32(0x0ff00000),
 				.proto      = 0xff,
 				.hop_limits = 0xff,
-				.src_addr   = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-						0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr   = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-						0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr   = RTE_IPV6_MASK_FULL,
+				.dst_addr   = RTE_IPV6_MASK_FULL,
 			},
 			.has_frag_ext = 1,
 		},
@@ -3363,8 +3361,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
 	struct nfp_flower_cmsg_tun_neigh_v6 payload;
 
 	tun->payload.v6_flag = 1;
-	memcpy(tun->payload.dst.dst_ipv6, ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
-	memcpy(tun->payload.src.src_ipv6, ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
+	memcpy(tun->payload.dst.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
+	memcpy(tun->payload.src.src_ipv6, &ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
 	memcpy(tun->payload.dst_addr, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	memcpy(tun->payload.src_addr, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 
@@ -3384,8 +3382,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
 			sizeof(struct nfp_flower_meta_tci));
 
 	memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v6));
-	memcpy(payload.dst_ipv6, ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
-	memcpy(payload.src_ipv6, ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
+	memcpy(payload.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
+	memcpy(payload.src_ipv6, &ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
 	memcpy(payload.common.dst_mac, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	memcpy(payload.common.src_mac, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
 	payload.common.port_id = port->in_port;
@@ -3612,7 +3610,7 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
@@ -3982,7 +3980,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
@@ -4059,7 +4057,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
 
 	pre_tun = (struct nfp_fl_act_pre_tun *)actions;
 	memset(pre_tun, 0, act_pre_size);
-	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+	nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
 
 	set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
 	memset(set_tun, 0, act_set_size);
diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c
index e9f0ce37109a..d72f6ce84c44 100644
--- a/drivers/net/nfp/nfp_net_flow.c
+++ b/drivers/net/nfp/nfp_net_flow.c
@@ -297,28 +297,28 @@ nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow,
 
 	ipv6->l4_protocol_mask = mask->hdr.proto;
 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
-		ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3];
-		ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2];
-		ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1];
-		ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i];
+		ipv6->src_ipv6_mask[i] = mask->hdr.src_addr.a[i + 3];
+		ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr.a[i + 2];
+		ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr.a[i + 1];
+		ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr.a[i];
 
-		ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3];
-		ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2];
-		ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1];
-		ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i];
+		ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr.a[i + 3];
+		ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr.a[i + 2];
+		ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr.a[i + 1];
+		ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr.a[i];
 	}
 
 	ipv6->l4_protocol = spec->hdr.proto;
 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
-		ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3];
-		ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2];
-		ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1];
-		ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i];
+		ipv6->src_ipv6[i] = spec->hdr.src_addr.a[i + 3];
+		ipv6->src_ipv6[i + 1] = spec->hdr.src_addr.a[i + 2];
+		ipv6->src_ipv6[i + 2] = spec->hdr.src_addr.a[i + 1];
+		ipv6->src_ipv6[i + 3] = spec->hdr.src_addr.a[i];
 
-		ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3];
-		ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2];
-		ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1];
-		ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i];
+		ipv6->dst_ipv6[i] = spec->hdr.dst_addr.a[i + 3];
+		ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr.a[i + 2];
+		ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr.a[i + 1];
+		ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr.a[i];
 	}
 
 	return 0;
@@ -406,10 +406,8 @@ static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = {
 		.mask_support = &(const struct rte_flow_item_ipv6){
 			.hdr = {
 				.proto    = 0xff,
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 			},
 		},
 		.mask_default = &rte_flow_item_ipv6_mask,
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index d98266eac55c..14fb4338e9c7 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -794,9 +794,9 @@ qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
 
 				spec = pattern->spec;
 				memcpy(flow->entry.tuple.src_ipv6,
-				       spec->hdr.src_addr, IPV6_ADDR_LEN);
+				       &spec->hdr.src_addr, IPV6_ADDR_LEN);
 				memcpy(flow->entry.tuple.dst_ipv6,
-				       spec->hdr.dst_addr, IPV6_ADDR_LEN);
+				       &spec->hdr.dst_addr, IPV6_ADDR_LEN);
 				flow->entry.tuple.eth_proto =
 					RTE_ETHER_TYPE_IPV6;
 			}
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 1b50aefe5c48..1006243539b5 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -575,14 +575,8 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
 	const struct rte_flow_item_ipv6 supp_mask = {
 		.hdr = {
-			.src_addr = { 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff },
-			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff,
-				      0xff, 0xff, 0xff, 0xff },
+			.src_addr = RTE_IPV6_MASK_FULL,
+			.dst_addr = RTE_IPV6_MASK_FULL,
 			.proto = 0xff,
 		}
 	};
@@ -618,28 +612,28 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
 	 * IPv6 addresses are in big-endian byte order in item and in
 	 * efx_spec
 	 */
-	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+	if (memcmp(&mask->hdr.src_addr, &supp_mask.hdr.src_addr,
 		   sizeof(mask->hdr.src_addr)) == 0) {
 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
 
 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
 				 sizeof(spec->hdr.src_addr));
-		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+		rte_memcpy(&efx_spec->efs_rem_host, &spec->hdr.src_addr,
 			   sizeof(efx_spec->efs_rem_host));
-	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+	} else if (!sfc_flow_is_zero(mask->hdr.src_addr.a,
 				     sizeof(mask->hdr.src_addr))) {
 		goto fail_bad_mask;
 	}
 
-	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+	if (memcmp(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr,
 		   sizeof(mask->hdr.dst_addr)) == 0) {
 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
 
 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
 				 sizeof(spec->hdr.dst_addr));
-		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+		rte_memcpy(&efx_spec->efs_loc_host, &spec->hdr.dst_addr,
 			   sizeof(efx_spec->efs_loc_host));
-	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr.a,
 				     sizeof(mask->hdr.dst_addr))) {
 		goto fail_bad_mask;
 	}
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 5ae1faf9165d..51ec07eb5acd 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -209,10 +209,8 @@ static const struct tap_flow_items tap_flow_items[] = {
 			       RTE_FLOW_ITEM_TYPE_TCP),
 		.mask = &(const struct rte_flow_item_ipv6){
 			.hdr = {
-				.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-				.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-					      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+				.src_addr = RTE_IPV6_MASK_FULL,
+				.dst_addr = RTE_IPV6_MASK_FULL,
 				.proto = -1,
 			},
 		},
@@ -613,13 +611,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
 		info->eth_type = htons(ETH_P_IPV6);
 	if (!spec)
 		return 0;
-	if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
+	if (memcmp(&mask->hdr.dst_addr, empty_addr, 16)) {
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
 			   sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
 			   sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
 	}
-	if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
+	if (memcmp(&mask->hdr.src_addr, empty_addr, 16)) {
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
 			   sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
 		tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7ef52d0b0fcd..5d2dd453687c 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1807,9 +1807,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 
 		/* check src addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+			if (ipv6_mask->hdr.src_addr.a[j] == UINT8_MAX) {
 				rule->mask.src_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.src_addr[j] != 0) {
+			} else if (ipv6_mask->hdr.src_addr.a[j] != 0) {
 				memset(rule, 0, sizeof(struct txgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1820,9 +1820,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 
 		/* check dst addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+			if (ipv6_mask->hdr.dst_addr.a[j] == UINT8_MAX) {
 				rule->mask.dst_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+			} else if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
 				memset(rule, 0, sizeof(struct txgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1835,9 +1835,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 			rule->b_spec = TRUE;
 			ipv6_spec = item->spec;
 			rte_memcpy(rule->input.src_ip,
-				   ipv6_spec->hdr.src_addr, 16);
+				   &ipv6_spec->hdr.src_addr, 16);
 			rte_memcpy(rule->input.dst_ip,
-				   ipv6_spec->hdr.dst_addr, 16);
+				   &ipv6_spec->hdr.dst_addr, 16);
 		}
 
 		/**
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 4af49dd802d0..65b6c251c684 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -659,9 +659,9 @@ txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
 			ic_session->src_ip.type = IPv6;
 			ic_session->dst_ip.type = IPv6;
 			rte_memcpy(ic_session->src_ip.ipv6,
-				   ipv6->hdr.src_addr, 16);
+				   &ipv6->hdr.src_addr, 16);
 			rte_memcpy(ic_session->dst_ip.ipv6,
-				   ipv6->hdr.dst_addr, 16);
+				   &ipv6->hdr.dst_addr, 16);
 		} else {
 			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
 			ic_session->src_ip.type = IPv4;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 736eae6f05ee..4c0fa5054a2e 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -311,7 +311,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			port_out = next_hop;
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
index 63352257c6e9..792aab0059e9 100644
--- a/examples/ip_pipeline/pipeline.c
+++ b/examples/ip_pipeline/pipeline.c
@@ -637,7 +637,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 1,
 		.input_index = 1,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[0]),
 	},
 
 	[2] = {
@@ -645,7 +645,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 2,
 		.input_index = 2,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[4]),
 	},
 
 	[3] = {
@@ -653,7 +653,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 3,
 		.input_index = 3,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[8]),
 	},
 
 	[4] = {
@@ -661,7 +661,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 4,
 		.input_index = 4,
-		.offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
+		.offset = offsetof(struct rte_ipv6_hdr, src_addr.a[12]),
 	},
 
 	/* Destination IP address (IPv6) */
@@ -670,7 +670,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 5,
 		.input_index = 5,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[0]),
 	},
 
 	[6] = {
@@ -678,7 +678,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 6,
 		.input_index = 6,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[4]),
 	},
 
 	[7] = {
@@ -686,7 +686,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 7,
 		.input_index = 7,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[8]),
 	},
 
 	[8] = {
@@ -694,7 +694,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
 		.size = sizeof(uint32_t),
 		.field_index = 8,
 		.input_index = 8,
-		.offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
+		.offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[12]),
 	},
 
 	/* Source Port */
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index c7019078f7b4..4da692eb23e6 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -400,7 +400,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
 		}
 
 		/* Find destination port */
-		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+		if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
 						&next_hop) == 0 &&
 				(enabled_port_mask & 1 << next_hop) != 0) {
 			dst_port = next_hop;
diff --git a/examples/ipsec-secgw/flow.c b/examples/ipsec-secgw/flow.c
index 05a62c3020fa..3f7630f5fd53 100644
--- a/examples/ipsec-secgw/flow.c
+++ b/examples/ipsec-secgw/flow.c
@@ -83,29 +83,8 @@ ipv4_addr_cpy(rte_be32_t *spec, rte_be32_t *mask, char *token,
 static void
 ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
 {
-	uint8_t *addr;
-
-	addr = hdr->src_addr;
-	printf("src: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx \t",
-	       (uint16_t)((addr[0] << 8) | addr[1]),
-	       (uint16_t)((addr[2] << 8) | addr[3]),
-	       (uint16_t)((addr[4] << 8) | addr[5]),
-	       (uint16_t)((addr[6] << 8) | addr[7]),
-	       (uint16_t)((addr[8] << 8) | addr[9]),
-	       (uint16_t)((addr[10] << 8) | addr[11]),
-	       (uint16_t)((addr[12] << 8) | addr[13]),
-	       (uint16_t)((addr[14] << 8) | addr[15]));
-
-	addr = hdr->dst_addr;
-	printf("dst: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx",
-	       (uint16_t)((addr[0] << 8) | addr[1]),
-	       (uint16_t)((addr[2] << 8) | addr[3]),
-	       (uint16_t)((addr[4] << 8) | addr[5]),
-	       (uint16_t)((addr[6] << 8) | addr[7]),
-	       (uint16_t)((addr[8] << 8) | addr[9]),
-	       (uint16_t)((addr[10] << 8) | addr[11]),
-	       (uint16_t)((addr[12] << 8) | addr[13]),
-	       (uint16_t)((addr[14] << 8) | addr[15]));
+	printf("src: " RTE_IPV6_ADDR_FMT " \t", RTE_IPV6_ADDR_SPLIT(&hdr->src_addr));
+	printf("dst: " RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&hdr->dst_addr));
 }
 
 static int
@@ -196,8 +175,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
 				if (status->status < 0)
 					return;
-				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr,
-						  rule->ipv6.mask.hdr.src_addr,
+				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr.a,
+						  rule->ipv6.mask.hdr.src_addr.a,
 						  tokens[ti], status))
 					return;
 			}
@@ -205,8 +184,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
 				if (status->status < 0)
 					return;
-				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr,
-						  rule->ipv6.mask.hdr.dst_addr,
+				if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr.a,
+						  rule->ipv6.mask.hdr.dst_addr.a,
 						  tokens[ti], status))
 					return;
 			}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index b52b0ffc3d22..ebde28639c12 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -529,9 +529,9 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
 			sa->pattern[1].spec = &sa->ipv6_spec;
 
-			memcpy(sa->ipv6_spec.hdr.dst_addr,
+			memcpy(&sa->ipv6_spec.hdr.dst_addr,
 				sa->dst.ip.ip6.ip6_b, 16);
-			memcpy(sa->ipv6_spec.hdr.src_addr,
+			memcpy(&sa->ipv6_spec.hdr.src_addr,
 			       sa->src.ip.ip6.ip6_b, 16);
 		} else if (IS_IP4(sa->flags)) {
 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
@@ -735,9 +735,9 @@ create_ipsec_esp_flow(struct ipsec_sa *sa)
 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
 		sa->pattern[1].spec = &sa->ipv6_spec;
-		memcpy(sa->ipv6_spec.hdr.dst_addr,
+		memcpy(&sa->ipv6_spec.hdr.dst_addr,
 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
-		memcpy(sa->ipv6_spec.hdr.src_addr,
+		memcpy(&sa->ipv6_spec.hdr.src_addr,
 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
 		sa->pattern[2].spec = &sa->esp_spec;
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index c4bac17cd77c..1a0afd2ed2e8 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1571,8 +1571,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
 	};
 
 	if (IS_IP6_TUNNEL(lsa->flags)) {
-		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
-		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
+		memcpy(&v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
+		memcpy(&v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
 	}
 
 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
diff --git a/examples/ipsec-secgw/sad.h b/examples/ipsec-secgw/sad.h
index 3224b6252c8d..fdb1d2ef1790 100644
--- a/examples/ipsec-secgw/sad.h
+++ b/examples/ipsec-secgw/sad.h
@@ -5,6 +5,8 @@
 #ifndef __SAD_H__
 #define __SAD_H__
 
+#include <rte_ip.h>
+#include <rte_ip6.h>
 #include <rte_ipsec_sad.h>
 
 #define SA_CACHE_SZ	128
@@ -37,8 +39,8 @@ cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
 			(sa->dst.ip.ip4 == ipv4->dst_addr)) ||
 			/* IPv6 check */
 			(!is_v4 && (sa_type == IP6_TUNNEL) &&
-			(!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
-			(!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
+			(!memcmp(sa->src.ip.ip6.ip6, &ipv6->src_addr, 16)) &&
+			(!memcmp(sa->dst.ip.ip6.ip6, &ipv6->dst_addr, 16))))
 		return 1;
 
 	return 0;
@@ -128,9 +130,9 @@ sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
 				}
 			}
 			v6[nb_v6].spi = esp->spi;
-			memcpy(v6[nb_v6].dip, ipv6->dst_addr,
+			memcpy(v6[nb_v6].dip, &ipv6->dst_addr,
 					sizeof(ipv6->dst_addr));
-			memcpy(v6[nb_v6].sip, ipv6->src_addr,
+			memcpy(v6[nb_v6].sip, &ipv6->src_addr,
 					sizeof(ipv6->src_addr));
 			keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
 						&v6[nb_v6];
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 993e36cec235..85f862dd5b40 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -65,7 +65,7 @@ fib_parse_packet(struct rte_mbuf *mbuf,
 	/* IPv6 */
 	else {
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
-		rte_mov16(ipv6, (const uint8_t *)ipv6_hdr->dst_addr);
+		rte_mov16(ipv6, ipv6_hdr->dst_addr.a);
 		*ip_type = 0;
 		(*ipv6_cnt)++;
 	}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index e8fd95aae9ce..422fdb70054d 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -62,7 +62,7 @@ lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
 		      uint16_t portid,
 		      struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
 {
-	const uint8_t *dst_ip = ipv6_hdr->dst_addr;
+	const uint8_t *dst_ip = ipv6_hdr->dst_addr.a;
 	uint32_t next_hop;
 
 	if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
@@ -122,7 +122,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
 
 		return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
-				ipv6_hdr->dst_addr, &next_hop) == 0)
+				ipv6_hdr->dst_addr.a, &next_hop) == 0)
 				? next_hop : portid);
 
 	}
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index 22c5c147d0ea..e8baedcc79d8 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -1005,10 +1005,8 @@ struct rte_flow_item_ipv6 {
 #ifndef __cplusplus
 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
 	.hdr = {
-		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src_addr = RTE_IPV6_MASK_FULL,
+		.dst_addr = RTE_IPV6_MASK_FULL,
 	},
 };
 #endif
diff --git a/lib/hash/rte_thash.h b/lib/hash/rte_thash.h
index ec0c029402fc..eab753a06f3d 100644
--- a/lib/hash/rte_thash.h
+++ b/lib/hash/rte_thash.h
@@ -139,24 +139,24 @@ rte_thash_load_v6_addrs(const struct rte_ipv6_hdr *orig,
 			union rte_thash_tuple *targ)
 {
 #ifdef RTE_ARCH_X86
-	__m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
+	__m128i ipv6 = _mm_loadu_si128((const __m128i *)&orig->src_addr);
 	*(__m128i *)targ->v6.src_addr =
 			_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
-	ipv6 = _mm_loadu_si128((const __m128i *)orig->dst_addr);
+	ipv6 = _mm_loadu_si128((const __m128i *)&orig->dst_addr);
 	*(__m128i *)targ->v6.dst_addr =
 			_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
 #elif defined(__ARM_NEON)
-	uint8x16_t ipv6 = vld1q_u8((uint8_t const *)orig->src_addr);
+	uint8x16_t ipv6 = vld1q_u8((uint8_t const *)&orig->src_addr);
 	vst1q_u8((uint8_t *)targ->v6.src_addr, vrev32q_u8(ipv6));
-	ipv6 = vld1q_u8((uint8_t const *)orig->dst_addr);
+	ipv6 = vld1q_u8((uint8_t const *)&orig->dst_addr);
 	vst1q_u8((uint8_t *)targ->v6.dst_addr, vrev32q_u8(ipv6));
 #else
 	int i;
 	for (i = 0; i < 4; i++) {
 		*((uint32_t *)targ->v6.src_addr + i) =
-			rte_be_to_cpu_32(*((const uint32_t *)orig->src_addr + i));
+			rte_be_to_cpu_32(*((const uint32_t *)&orig->src_addr + i));
 		*((uint32_t *)targ->v6.dst_addr + i) =
-			rte_be_to_cpu_32(*((const uint32_t *)orig->dst_addr + i));
+			rte_be_to_cpu_32(*((const uint32_t *)&orig->dst_addr + i));
 	}
 #endif
 }
diff --git a/lib/ip_frag/rte_ipv6_reassembly.c b/lib/ip_frag/rte_ipv6_reassembly.c
index 88863a98d1fe..9471ce5333d7 100644
--- a/lib/ip_frag/rte_ipv6_reassembly.c
+++ b/lib/ip_frag/rte_ipv6_reassembly.c
@@ -143,8 +143,8 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
 	int32_t ip_len;
 	int32_t trim;
 
-	rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
-	rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
+	rte_memcpy(&key.src_dst[0], &ip_hdr->src_addr, 16);
+	rte_memcpy(&key.src_dst[2], &ip_hdr->dst_addr, 16);
 
 	key.id = frag_hdr->id;
 	key.key_len = IPV6_KEYLEN;
diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 2420339d1598..25a77277f050 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -256,8 +256,8 @@ struct rte_ipv6_hdr {
 	rte_be16_t payload_len;	/**< IP payload size, including ext. headers */
 	uint8_t  proto;		/**< Protocol, next header. */
 	uint8_t  hop_limits;	/**< Hop limits. */
-	uint8_t  src_addr[16];	/**< IP address of source host. */
-	uint8_t  dst_addr[16];	/**< IP address of destination host(s). */
+	struct rte_ipv6_addr src_addr;	/**< IP address of source host. */
+	struct rte_ipv6_addr dst_addr;	/**< IP address of destination host(s). */
 } __rte_packed;
 
 /* IPv6 routing extension type definition. */
@@ -325,7 +325,7 @@ rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
 	else
 		psd_hdr.len = ipv6_hdr->payload_len;
 
-	sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+	sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
 		sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr),
 		0);
 	sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
diff --git a/lib/node/ip6_lookup.c b/lib/node/ip6_lookup.c
index 309964f60fd6..6bbcf14e2aa8 100644
--- a/lib/node/ip6_lookup.c
+++ b/lib/node/ip6_lookup.c
@@ -112,28 +112,28 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[0], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[0], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf1 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf1, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[1], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[1], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf2 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf2, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[2], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[2], &ipv6_hdr->dst_addr, 16);
 
 		/* Extract DIP of mbuf3 */
 		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv6_hdr *,
 				sizeof(struct rte_ether_hdr));
 		/* Extract hop_limits as ipv6 hdr is in cache */
 		node_mbuf_priv1(mbuf3, dyn)->ttl = ipv6_hdr->hop_limits;
-		rte_memcpy(ip_batch[3], ipv6_hdr->dst_addr, 16);
+		rte_memcpy(ip_batch[3], &ipv6_hdr->dst_addr, 16);
 
 		rte_lpm6_lookup_bulk_func(lpm6, ip_batch, next_hop, 4);
 
@@ -223,7 +223,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
 		/* Extract TTL as IPv6 hdr is in cache */
 		node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
 
-		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr, &next_hop);
+		rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr.a, &next_hop);
 		next_hop = (rc == 0) ? next_hop : drop_nh;
 
 		node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
diff --git a/lib/pipeline/rte_swx_ipsec.c b/lib/pipeline/rte_swx_ipsec.c
index 73e8211b2818..0ed0ecd134c8 100644
--- a/lib/pipeline/rte_swx_ipsec.c
+++ b/lib/pipeline/rte_swx_ipsec.c
@@ -1386,13 +1386,11 @@ tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p
 		.payload_len = 0, /* Cannot be pre-computed. */
 		.proto = IPPROTO_ESP,
 		.hop_limits = 64,
-		.src_addr = {0},
-		.dst_addr = {0},
 	};
 
 	memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
-	memcpy(h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
-	memcpy(h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
+	memcpy(&h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
+	memcpy(&h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
 }
 
 /* IPsec library SA parameters. */
diff --git a/lib/pipeline/rte_table_action.c b/lib/pipeline/rte_table_action.c
index 87c3e0e2c935..c0be656536eb 100644
--- a/lib/pipeline/rte_table_action.c
+++ b/lib/pipeline/rte_table_action.c
@@ -871,10 +871,10 @@ encap_vxlan_apply(void *data,
 			d->ipv6.payload_len = 0; /* not pre-computed */
 			d->ipv6.proto = IP_PROTO_UDP;
 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-			memcpy(d->ipv6.src_addr,
+			memcpy(&d->ipv6.src_addr,
 				p->vxlan.ipv6.sa,
 				sizeof(p->vxlan.ipv6.sa));
-			memcpy(d->ipv6.dst_addr,
+			memcpy(&d->ipv6.dst_addr,
 				p->vxlan.ipv6.da,
 				sizeof(p->vxlan.ipv6.da));
 
@@ -906,10 +906,10 @@ encap_vxlan_apply(void *data,
 			d->ipv6.payload_len = 0; /* not pre-computed */
 			d->ipv6.proto = IP_PROTO_UDP;
 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-			memcpy(d->ipv6.src_addr,
+			memcpy(&d->ipv6.src_addr,
 				p->vxlan.ipv6.sa,
 				sizeof(p->vxlan.ipv6.sa));
-			memcpy(d->ipv6.dst_addr,
+			memcpy(&d->ipv6.dst_addr,
 				p->vxlan.ipv6.da,
 				sizeof(p->vxlan.ipv6.da));
 
@@ -1436,12 +1436,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t tcp_cksum;
 
 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
-				(uint16_t *)ip->src_addr,
+				(uint16_t *)&ip->src_addr,
 				(uint16_t *)data->addr,
 				tcp->src_port,
 				data->port);
 
-			rte_memcpy(ip->src_addr, data->addr, 16);
+			rte_memcpy(&ip->src_addr, data->addr, 16);
 			tcp->src_port = data->port;
 			tcp->cksum = tcp_cksum;
 		} else {
@@ -1449,12 +1449,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t udp_cksum;
 
 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
-				(uint16_t *)ip->src_addr,
+				(uint16_t *)&ip->src_addr,
 				(uint16_t *)data->addr,
 				udp->src_port,
 				data->port);
 
-			rte_memcpy(ip->src_addr, data->addr, 16);
+			rte_memcpy(&ip->src_addr, data->addr, 16);
 			udp->src_port = data->port;
 			udp->dgram_cksum = udp_cksum;
 		}
@@ -1464,12 +1464,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t tcp_cksum;
 
 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
-				(uint16_t *)ip->dst_addr,
+				(uint16_t *)&ip->dst_addr,
 				(uint16_t *)data->addr,
 				tcp->dst_port,
 				data->port);
 
-			rte_memcpy(ip->dst_addr, data->addr, 16);
+			rte_memcpy(&ip->dst_addr, data->addr, 16);
 			tcp->dst_port = data->port;
 			tcp->cksum = tcp_cksum;
 		} else {
@@ -1477,12 +1477,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
 			uint16_t udp_cksum;
 
 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
-				(uint16_t *)ip->dst_addr,
+				(uint16_t *)&ip->dst_addr,
 				(uint16_t *)data->addr,
 				udp->dst_port,
 				data->port);
 
-			rte_memcpy(ip->dst_addr, data->addr, 16);
+			rte_memcpy(&ip->dst_addr, data->addr, 16);
 			udp->dst_port = data->port;
 			udp->dgram_cksum = udp_cksum;
 		}
-- 
2.47.0


^ permalink raw reply	[relevance 1%]

* release candidate 24.11-rc1
@ 2024-10-18 21:47  4% Thomas Monjalon
  2024-10-29 10:19  0% ` Xu, HailinX
  2024-10-29 19:31  0% ` Thinh Tran
  0 siblings, 2 replies; 169+ results
From: Thomas Monjalon @ 2024-10-18 21:47 UTC (permalink / raw)
  To: announce

A new DPDK release candidate is ready for testing:
	https://git.dpdk.org/dpdk/tag/?id=v24.11-rc1

There are 630 new patches in this snapshot,
including many API/ABI compatibility breakages.
This release won't be ABI-compatible with previous ones.

Release notes:
	https://doc.dpdk.org/guides/rel_notes/release_24_11.html

Highlights of 24.11-rc1:
	- bit set and atomic bit manipulation
	- IPv6 address API
	- Ethernet link lanes
	- flow table index action
	- Cisco enic VF
	- Marvell CN20K
	- symmetric crypto SM4
	- asymmetric crypto EdDSA
	- event device pre-scheduling
	- event device independent enqueue

Please test and report issues on bugs.dpdk.org.

Few more new APIs may be added in -rc2.
DPDK 24.11-rc2 is expected in more than two weeks (early November).

Thank you everyone



^ permalink raw reply	[relevance 4%]

* RE: DPDK - PCIe Steering Tags Meeting on 10/23/24
  2024-10-17 19:56  3% DPDK - PCIe Steering Tags Meeting on 10/23/24 Wathsala Wathawana Vithanage
@ 2024-10-21  2:05  0% ` Wathsala Wathawana Vithanage
  0 siblings, 0 replies; 169+ results
From: Wathsala Wathawana Vithanage @ 2024-10-21  2:05 UTC (permalink / raw)
  To: dev, Nathan Southern, thomas, Honnappa Nagarahalli; +Cc: nd, nd

Here is the updated RFC https://inbox.dpdk.org/dev/20241021015246.304431-1-wathsala.vithanage@arm.com/#t

Thanks

--wathsal

>
> Subject: DPDK - PCIe Steering Tags Meeting on 10/23/24
> 
> Hi all,
> 
> This is an invitation to discuss adding PCIe steering tags support to DPDK.
> We have had brief conversations over the idea at the DPDK summit.
> Steering tags allows stashing of descriptors and packet data closer to the
> CPUs, possibly allowing for lower latency and higher throughput.
> This feature requires contributions from CPU vendors and NIC vendors.
> The goal of the meeting is to present the next version of the API and seek
> support for implementation from other participants in the community.
> 
> I will be sending out the RFC some time this week, so there will be a plenty of
> time before the meeting to go over it.
> 
> Agenda:
> - Brief introduction to the feature
> - Introduce the APIs from RFC v2 (this will be submitted to the community
> before the call)
> - Dependencies on kernel support - API for reading steering tags
> - Addressing ABI in advance as patches will not be ready by 24.11
> 
> Please join the call if you are interested in the topic.
> LXF meeting registration ink: https://zoom-
> lfx.platform.linuxfoundation.org/meeting/94917063595?password=77f3662
> 5-ad41-4b9c-b067-d33e68c3a29e&invite=true
> 
> Thanks.
> 
> --wathsala


^ permalink raw reply	[relevance 0%]

* Re: [RFC v3 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-17  6:38  3% ` [RFC v3 00/10] eventdev: remove single-event " Mattias Rönnblom
  2024-10-17  6:38 11%   ` [RFC v3 10/10] eventdev: remove single event " Mattias Rönnblom
@ 2024-10-21  7:25  0%   ` Jerin Jacob
  2024-10-21  8:38  0%     ` Mattias Rönnblom
  2024-10-21  8:51  3%   ` [PATCH " Mattias Rönnblom
  2024-10-21  9:06  3%   ` Mattias Rönnblom
  3 siblings, 1 reply; 169+ results
From: Jerin Jacob @ 2024-10-21  7:25 UTC (permalink / raw)
  To: Mattias Rönnblom
  Cc: Jerin Jacob, dev, Mattias Rönnblom, David Marchand,
	Stephen Hemminger, Anoob Joseph, Hemant Agrawal, Sachin Saxena,
	Abdullah Sevincer, Pavan Nikhilesh, Shijith Thotton,
	Harry van Haaren

On Fri, Oct 18, 2024 at 1:14 AM Mattias Rönnblom
<mattias.ronnblom@ericsson.com> wrote:
>
> Remove the single-event enqueue and dequeue functions from the
> eventdev "ops" struct, to reduce complexity, leaving performance
> unaffected.
>
> This ABI change has been announced as a DPDK deprication notice,
> originally scheduled for DPDK 23.11.
>
> Mattias Rönnblom (9):

Changes look good. Please send the NON RFC version of the series ASAP.
I will merge it for rc2 (rc1 is created now)

>   event/dsw: remove single event enqueue and dequeue
>   event/dlb2: remove single event enqueue and dequeue
>   event/octeontx: remove single event enqueue and dequeue
>   event/sw: remove single event enqueue and dequeue
>   event/dpaa: remove single event enqueue and dequeue
>   event/dpaa2: remove single event enqueue and dequeue
>   event/opdl: remove single event enqueue and dequeue
>   event/skeleton: remove single event enqueue and dequeue
>   eventdev: remove single event enqueue and dequeue
>
> Pavan Nikhilesh (1):
>   event/cnxk: remove single event enqueue and dequeue
>  drivers/event/sw/sw_evdev_worker.c         | 12 ----
>  lib/eventdev/eventdev_pmd.h                |  4 --
>  lib/eventdev/eventdev_private.c            | 22 -------
>  lib/eventdev/rte_eventdev.h                | 21 ++----
>  lib/eventdev/rte_eventdev_core.h           | 11 ----
>  25 files changed, 52 insertions(+), 427 deletions(-)
>
> --
> 2.43.0
>

^ permalink raw reply	[relevance 0%]

* Re: [RFC v3 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-21  7:25  0%   ` [RFC v3 00/10] eventdev: remove single-event " Jerin Jacob
@ 2024-10-21  8:38  0%     ` Mattias Rönnblom
  0 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-21  8:38 UTC (permalink / raw)
  To: Jerin Jacob, Mattias Rönnblom
  Cc: Jerin Jacob, dev, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren

On 2024-10-21 09:25, Jerin Jacob wrote:
> On Fri, Oct 18, 2024 at 1:14 AM Mattias Rönnblom
> <mattias.ronnblom@ericsson.com> wrote:
>>
>> Remove the single-event enqueue and dequeue functions from the
>> eventdev "ops" struct, to reduce complexity, leaving performance
>> unaffected.
>>
>> This ABI change has been announced as a DPDK deprication notice,
>> originally scheduled for DPDK 23.11.
>>
>> Mattias Rönnblom (9):
> 
> Changes look good. Please send the NON RFC version of the series ASAP.
> I will merge it for rc2 (rc1 is created now)
> 

Without any more changes? OK.

>>    event/dsw: remove single event enqueue and dequeue
>>    event/dlb2: remove single event enqueue and dequeue
>>    event/octeontx: remove single event enqueue and dequeue
>>    event/sw: remove single event enqueue and dequeue
>>    event/dpaa: remove single event enqueue and dequeue
>>    event/dpaa2: remove single event enqueue and dequeue
>>    event/opdl: remove single event enqueue and dequeue
>>    event/skeleton: remove single event enqueue and dequeue
>>    eventdev: remove single event enqueue and dequeue
>>
>> Pavan Nikhilesh (1):
>>    event/cnxk: remove single event enqueue and dequeue
>>   drivers/event/sw/sw_evdev_worker.c         | 12 ----
>>   lib/eventdev/eventdev_pmd.h                |  4 --
>>   lib/eventdev/eventdev_private.c            | 22 -------
>>   lib/eventdev/rte_eventdev.h                | 21 ++----
>>   lib/eventdev/rte_eventdev_core.h           | 11 ----
>>   25 files changed, 52 insertions(+), 427 deletions(-)
>>
>> --
>> 2.43.0
>>


^ permalink raw reply	[relevance 0%]

* [PATCH 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-21  8:51  3%   ` [PATCH " Mattias Rönnblom
@ 2024-10-21  8:51 11%     ` Mattias Rönnblom
  2024-10-21  9:21  0%     ` [PATCH 00/10] eventdev: remove single-event " Mattias Rönnblom
  1 sibling, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-21  8:51 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single event enqueue and dequeue, since they did not
provide any noticeable performance benefits.

This is a change of the ABI, previously announced as a deprecation
notice. These functions were not directly invoked by the application,
so the API remains unaffected.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

RFC v3:
 * Update release notes. (Jerin Jacob)
 * Remove single-event enqueue and dequeue function typedefs.
   (Pavan Nikhilesh)
---
 doc/guides/rel_notes/deprecation.rst   |  6 +-----
 doc/guides/rel_notes/release_24_11.rst |  3 +++
 lib/eventdev/eventdev_pmd.h            |  4 ----
 lib/eventdev/eventdev_private.c        | 22 ----------------------
 lib/eventdev/rte_eventdev.h            | 21 ++++-----------------
 lib/eventdev/rte_eventdev_core.h       | 11 -----------
 6 files changed, 8 insertions(+), 59 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 17b7332007..a90b54fc77 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -131,11 +131,7 @@ Deprecation Notices
 
 * eventdev: The single-event (non-burst) enqueue and dequeue operations,
   used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
-  will be removed in DPDK 23.11.
-  This simplification includes changing the layout and potentially also
-  the size of the public ``rte_event_fp_ops`` struct, breaking the ABI.
-  Since these functions are not called directly by the application,
-  the API remains unaffected.
+  are removed in DPDK 24.11.
 
 * pipeline: The pipeline library legacy API (functions rte_pipeline_*)
   will be deprecated and subsequently removed in DPDK 24.11 release.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..5461798970 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -401,6 +401,9 @@ ABI Changes
 
 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
+* eventdev: The PMD single-event enqueue and dequeue function pointers are removed
+  from ``rte_event_fp_fps``.
+
 * graph: To accommodate node specific xstats counters, added ``xstat_cntrs``,
   ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``,
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index af855e3467..36148f8d86 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
 
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< Pointer to PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< Pointer to PMD enqueue burst function(op new variant) */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 	event_maintain_t maintain;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index b628f4a69e..6df129fc2d 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -5,15 +5,6 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev.h"
 
-static uint16_t
-dummy_event_enqueue(__rte_unused void *port,
-		    __rte_unused const struct rte_event *ev)
-{
-	RTE_EDEV_LOG_ERR(
-		"event enqueue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_enqueue_burst(__rte_unused void *port,
 			  __rte_unused const struct rte_event ev[],
@@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
-static uint16_t
-dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
-		    __rte_unused uint64_t timeout_ticks)
-{
-	RTE_EDEV_LOG_ERR(
-		"event dequeue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_dequeue_burst(__rte_unused void *port,
 			  __rte_unused struct rte_event ev[],
@@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
 	static const struct rte_event_fp_ops dummy = {
-		.enqueue = dummy_event_enqueue,
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
-		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
@@ -153,11 +133,9 @@ void
 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 		     const struct rte_eventdev *dev)
 {
-	fp_op->enqueue = dev->enqueue;
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
-	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
 	fp_op->txa_enqueue = dev->txa_enqueue;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index b5c3c16dd0..fabd1490db 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	}
 #endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->enqueue)(port, ev);
-	else
-		return fn(port, ev, nb_events);
+
+	return fn(port, ev, nb_events);
 }
 
 /**
@@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	}
 #endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->dequeue)(port, ev, timeout_ticks);
-	else
-		return (fp_ops->dequeue_burst)(port, ev, nb_events,
-					       timeout_ticks);
+
+	return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
 }
 
 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 2706d5e6c8..1818483044 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -12,18 +12,11 @@
 extern "C" {
 #endif
 
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
 typedef uint16_t (*event_enqueue_burst_t)(void *port,
 					  const struct rte_event ev[],
 					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-				    uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 					  uint16_t nb_events,
 					  uint64_t timeout_ticks);
@@ -60,16 +53,12 @@ typedef void (*event_preschedule_t)(void *port,
 struct __rte_cache_aligned rte_event_fp_ops {
 	void **data;
 	/**< points to array of internal port data pointers */
-	event_enqueue_t enqueue;
-	/**< PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< PMD enqueue burst new function. */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< PMD enqueue burst fwd function. */
-	event_dequeue_t dequeue;
-	/**< PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< PMD dequeue burst function. */
 	event_maintain_t maintain;
-- 
2.43.0


^ permalink raw reply	[relevance 11%]

* [PATCH 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-17  6:38  3% ` [RFC v3 00/10] eventdev: remove single-event " Mattias Rönnblom
  2024-10-17  6:38 11%   ` [RFC v3 10/10] eventdev: remove single event " Mattias Rönnblom
  2024-10-21  7:25  0%   ` [RFC v3 00/10] eventdev: remove single-event " Jerin Jacob
@ 2024-10-21  8:51  3%   ` Mattias Rönnblom
  2024-10-21  8:51 11%     ` [PATCH 10/10] eventdev: remove single event " Mattias Rönnblom
  2024-10-21  9:21  0%     ` [PATCH 00/10] eventdev: remove single-event " Mattias Rönnblom
  2024-10-21  9:06  3%   ` Mattias Rönnblom
  3 siblings, 2 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-21  8:51 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single-event enqueue and dequeue functions from the
eventdev "ops" struct, to reduce complexity, leaving performance
unaffected.

This ABI change has been announced as a DPDK deprecation notice,
originally scheduled for DPDK 23.11.

Mattias Rönnblom (9):
  event/dsw: remove single event enqueue and dequeue
  event/dlb2: remove single event enqueue and dequeue
  event/octeontx: remove single event enqueue and dequeue
  event/sw: remove single event enqueue and dequeue
  event/dpaa: remove single event enqueue and dequeue
  event/dpaa2: remove single event enqueue and dequeue
  event/opdl: remove single event enqueue and dequeue
  event/skeleton: remove single event enqueue and dequeue
  eventdev: remove single event enqueue and dequeue

Pavan Nikhilesh (1):
  event/cnxk: remove single event enqueue and dequeue

 doc/guides/rel_notes/deprecation.rst       |  6 +-
 doc/guides/rel_notes/release_24_11.rst     |  3 +
 drivers/event/cnxk/cn10k_eventdev.c        | 74 ++--------------------
 drivers/event/cnxk/cn10k_worker.c          | 49 +++++++-------
 drivers/event/cnxk/cn10k_worker.h          |  1 -
 drivers/event/cnxk/cn9k_eventdev.c         | 73 +--------------------
 drivers/event/cnxk/cn9k_worker.c           | 26 +++-----
 drivers/event/cnxk/cn9k_worker.h           |  3 -
 drivers/event/dlb2/dlb2.c                  | 40 +-----------
 drivers/event/dpaa/dpaa_eventdev.c         | 27 +-------
 drivers/event/dpaa2/dpaa2_eventdev.c       | 15 -----
 drivers/event/dsw/dsw_evdev.c              |  2 -
 drivers/event/dsw/dsw_evdev.h              |  2 -
 drivers/event/dsw/dsw_event.c              | 12 ----
 drivers/event/octeontx/ssovf_evdev.h       |  1 -
 drivers/event/octeontx/ssovf_worker.c      | 40 ++----------
 drivers/event/opdl/opdl_evdev.c            |  2 -
 drivers/event/skeleton/skeleton_eventdev.c | 29 ---------
 drivers/event/sw/sw_evdev.c                |  2 -
 drivers/event/sw/sw_evdev.h                |  2 -
 drivers/event/sw/sw_evdev_worker.c         | 12 ----
 lib/eventdev/eventdev_pmd.h                |  4 --
 lib/eventdev/eventdev_private.c            | 22 -------
 lib/eventdev/rte_eventdev.h                | 21 ++----
 lib/eventdev/rte_eventdev_core.h           | 11 ----
 25 files changed, 52 insertions(+), 427 deletions(-)

-- 
2.43.0


^ permalink raw reply	[relevance 3%]

* [PATCH 10/10] eventdev: remove single event enqueue and dequeue
  2024-10-21  9:06  3%   ` Mattias Rönnblom
@ 2024-10-21  9:06 11%     ` Mattias Rönnblom
  0 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-21  9:06 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single event enqueue and dequeue, since they did not
provide any noticeable performance benefits.

This is a change of the ABI, previously announced as a deprecation
notice. These functions were not directly invoked by the application,
so the API remains unaffected.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>

--

RFC v3:
 * Update release notes. (Jerin Jacob)
 * Remove single-event enqueue and dequeue function typedefs.
   (Pavan Nikhilesh)
---
 doc/guides/rel_notes/deprecation.rst   |  6 +-----
 doc/guides/rel_notes/release_24_11.rst |  3 +++
 lib/eventdev/eventdev_pmd.h            |  4 ----
 lib/eventdev/eventdev_private.c        | 22 ----------------------
 lib/eventdev/rte_eventdev.h            | 21 ++++-----------------
 lib/eventdev/rte_eventdev_core.h       | 11 -----------
 6 files changed, 8 insertions(+), 59 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 17b7332007..a90b54fc77 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -131,11 +131,7 @@ Deprecation Notices
 
 * eventdev: The single-event (non-burst) enqueue and dequeue operations,
   used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
-  will be removed in DPDK 23.11.
-  This simplification includes changing the layout and potentially also
-  the size of the public ``rte_event_fp_ops`` struct, breaking the ABI.
-  Since these functions are not called directly by the application,
-  the API remains unaffected.
+  are removed in DPDK 24.11.
 
 * pipeline: The pipeline library legacy API (functions rte_pipeline_*)
   will be deprecated and subsequently removed in DPDK 24.11 release.
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..5461798970 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -401,6 +401,9 @@ ABI Changes
 
 * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
+* eventdev: The PMD single-event enqueue and dequeue function pointers are removed
+  from ``rte_event_fp_fps``.
+
 * graph: To accommodate node specific xstats counters, added ``xstat_cntrs``,
   ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``,
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index af855e3467..36148f8d86 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev {
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
 
-	event_enqueue_t enqueue;
-	/**< Pointer to PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< Pointer to PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< Pointer to PMD enqueue burst function(op new variant) */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< Pointer to PMD enqueue burst function(op forward variant) */
-	event_dequeue_t dequeue;
-	/**< Pointer to PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 	event_maintain_t maintain;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index b628f4a69e..6df129fc2d 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -5,15 +5,6 @@
 #include "eventdev_pmd.h"
 #include "rte_eventdev.h"
 
-static uint16_t
-dummy_event_enqueue(__rte_unused void *port,
-		    __rte_unused const struct rte_event *ev)
-{
-	RTE_EDEV_LOG_ERR(
-		"event enqueue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_enqueue_burst(__rte_unused void *port,
 			  __rte_unused const struct rte_event ev[],
@@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
-static uint16_t
-dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
-		    __rte_unused uint64_t timeout_ticks)
-{
-	RTE_EDEV_LOG_ERR(
-		"event dequeue requested for unconfigured event device");
-	return 0;
-}
-
 static uint16_t
 dummy_event_dequeue_burst(__rte_unused void *port,
 			  __rte_unused struct rte_event ev[],
@@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
 	static const struct rte_event_fp_ops dummy = {
-		.enqueue = dummy_event_enqueue,
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
-		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
@@ -153,11 +133,9 @@ void
 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 		     const struct rte_eventdev *dev)
 {
-	fp_op->enqueue = dev->enqueue;
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
-	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
 	fp_op->txa_enqueue = dev->txa_enqueue;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index b5c3c16dd0..fabd1490db 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
 	}
 #endif
 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->enqueue)(port, ev);
-	else
-		return fn(port, ev, nb_events);
+
+	return fn(port, ev, nb_events);
 }
 
 /**
@@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 	}
 #endif
 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
-	/*
-	 * Allow zero cost non burst mode routine invocation if application
-	 * requests nb_events as const one
-	 */
-	if (nb_events == 1)
-		return (fp_ops->dequeue)(port, ev, timeout_ticks);
-	else
-		return (fp_ops->dequeue_burst)(port, ev, nb_events,
-					       timeout_ticks);
+
+	return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
 }
 
 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 2706d5e6c8..1818483044 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -12,18 +12,11 @@
 extern "C" {
 #endif
 
-typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
-/**< @internal Enqueue event on port of a device */
-
 typedef uint16_t (*event_enqueue_burst_t)(void *port,
 					  const struct rte_event ev[],
 					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
-typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
-				    uint64_t timeout_ticks);
-/**< @internal Dequeue event from port of a device */
-
 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 					  uint16_t nb_events,
 					  uint64_t timeout_ticks);
@@ -60,16 +53,12 @@ typedef void (*event_preschedule_t)(void *port,
 struct __rte_cache_aligned rte_event_fp_ops {
 	void **data;
 	/**< points to array of internal port data pointers */
-	event_enqueue_t enqueue;
-	/**< PMD enqueue function. */
 	event_enqueue_burst_t enqueue_burst;
 	/**< PMD enqueue burst function. */
 	event_enqueue_burst_t enqueue_new_burst;
 	/**< PMD enqueue burst new function. */
 	event_enqueue_burst_t enqueue_forward_burst;
 	/**< PMD enqueue burst fwd function. */
-	event_dequeue_t dequeue;
-	/**< PMD dequeue function. */
 	event_dequeue_burst_t dequeue_burst;
 	/**< PMD dequeue burst function. */
 	event_maintain_t maintain;
-- 
2.43.0


^ permalink raw reply	[relevance 11%]

* Re: [PATCH 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-21  8:51  3%   ` [PATCH " Mattias Rönnblom
  2024-10-21  8:51 11%     ` [PATCH 10/10] eventdev: remove single event " Mattias Rönnblom
@ 2024-10-21  9:21  0%     ` Mattias Rönnblom
  1 sibling, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-10-21  9:21 UTC (permalink / raw)
  To: Mattias Rönnblom, Jerin Jacob
  Cc: dev, David Marchand, Stephen Hemminger, Anoob Joseph,
	Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren

On 2024-10-21 10:51, Mattias Rönnblom wrote:
> Remove the single-event enqueue and dequeue functions from the
> eventdev "ops" struct, to reduce complexity, leaving performance
> unaffected.
> 
> This ABI change has been announced as a DPDK deprecation notice,
> originally scheduled for DPDK 23.11.
> 

The outgoing SMTP server I'm required to use seems to throw away random 
messages for the moment.

I tried to repost the same patchset, but in that case it threw away the 
cover letter.

Jerin, maybe you can puzzle something together.

> Mattias Rönnblom (9):
>    event/dsw: remove single event enqueue and dequeue
>    event/dlb2: remove single event enqueue and dequeue
>    event/octeontx: remove single event enqueue and dequeue
>    event/sw: remove single event enqueue and dequeue
>    event/dpaa: remove single event enqueue and dequeue
>    event/dpaa2: remove single event enqueue and dequeue
>    event/opdl: remove single event enqueue and dequeue
>    event/skeleton: remove single event enqueue and dequeue
>    eventdev: remove single event enqueue and dequeue
> 
> Pavan Nikhilesh (1):
>    event/cnxk: remove single event enqueue and dequeue
> 
>   doc/guides/rel_notes/deprecation.rst       |  6 +-
>   doc/guides/rel_notes/release_24_11.rst     |  3 +
>   drivers/event/cnxk/cn10k_eventdev.c        | 74 ++--------------------
>   drivers/event/cnxk/cn10k_worker.c          | 49 +++++++-------
>   drivers/event/cnxk/cn10k_worker.h          |  1 -
>   drivers/event/cnxk/cn9k_eventdev.c         | 73 +--------------------
>   drivers/event/cnxk/cn9k_worker.c           | 26 +++-----
>   drivers/event/cnxk/cn9k_worker.h           |  3 -
>   drivers/event/dlb2/dlb2.c                  | 40 +-----------
>   drivers/event/dpaa/dpaa_eventdev.c         | 27 +-------
>   drivers/event/dpaa2/dpaa2_eventdev.c       | 15 -----
>   drivers/event/dsw/dsw_evdev.c              |  2 -
>   drivers/event/dsw/dsw_evdev.h              |  2 -
>   drivers/event/dsw/dsw_event.c              | 12 ----
>   drivers/event/octeontx/ssovf_evdev.h       |  1 -
>   drivers/event/octeontx/ssovf_worker.c      | 40 ++----------
>   drivers/event/opdl/opdl_evdev.c            |  2 -
>   drivers/event/skeleton/skeleton_eventdev.c | 29 ---------
>   drivers/event/sw/sw_evdev.c                |  2 -
>   drivers/event/sw/sw_evdev.h                |  2 -
>   drivers/event/sw/sw_evdev_worker.c         | 12 ----
>   lib/eventdev/eventdev_pmd.h                |  4 --
>   lib/eventdev/eventdev_private.c            | 22 -------
>   lib/eventdev/rte_eventdev.h                | 21 ++----
>   lib/eventdev/rte_eventdev_core.h           | 11 ----
>   25 files changed, 52 insertions(+), 427 deletions(-)
> 


^ permalink raw reply	[relevance 0%]

* [PATCH 00/10] eventdev: remove single-event enqueue and dequeue
  2024-10-17  6:38  3% ` [RFC v3 00/10] eventdev: remove single-event " Mattias Rönnblom
                     ` (2 preceding siblings ...)
  2024-10-21  8:51  3%   ` [PATCH " Mattias Rönnblom
@ 2024-10-21  9:06  3%   ` Mattias Rönnblom
  2024-10-21  9:06 11%     ` [PATCH 10/10] eventdev: remove single event " Mattias Rönnblom
  3 siblings, 1 reply; 169+ results
From: Mattias Rönnblom @ 2024-10-21  9:06 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: dev, Mattias Rönnblom, David Marchand, Stephen Hemminger,
	Anoob Joseph, Hemant Agrawal, Sachin Saxena, Abdullah Sevincer,
	Pavan Nikhilesh, Shijith Thotton, Harry van Haaren,
	Mattias Rönnblom

Remove the single-event enqueue and dequeue functions from the
eventdev "ops" struct, to reduce complexity, leaving performance
unaffected.

This ABI change has been announced as a DPDK deprecation notice,
originally scheduled for DPDK 23.11.

Mattias Rönnblom (9):
  event/dsw: remove single event enqueue and dequeue
  event/dlb2: remove single event enqueue and dequeue
  event/octeontx: remove single event enqueue and dequeue
  event/sw: remove single event enqueue and dequeue
  event/dpaa: remove single event enqueue and dequeue
  event/dpaa2: remove single event enqueue and dequeue
  event/opdl: remove single event enqueue and dequeue
  event/skeleton: remove single event enqueue and dequeue
  eventdev: remove single event enqueue and dequeue

Pavan Nikhilesh (1):
  event/cnxk: remove single event enqueue and dequeue

 doc/guides/rel_notes/deprecation.rst       |  6 +-
 doc/guides/rel_notes/release_24_11.rst     |  3 +
 drivers/event/cnxk/cn10k_eventdev.c        | 74 ++--------------------
 drivers/event/cnxk/cn10k_worker.c          | 49 +++++++-------
 drivers/event/cnxk/cn10k_worker.h          |  1 -
 drivers/event/cnxk/cn9k_eventdev.c         | 73 +--------------------
 drivers/event/cnxk/cn9k_worker.c           | 26 +++-----
 drivers/event/cnxk/cn9k_worker.h           |  3 -
 drivers/event/dlb2/dlb2.c                  | 40 +-----------
 drivers/event/dpaa/dpaa_eventdev.c         | 27 +-------
 drivers/event/dpaa2/dpaa2_eventdev.c       | 15 -----
 drivers/event/dsw/dsw_evdev.c              |  2 -
 drivers/event/dsw/dsw_evdev.h              |  2 -
 drivers/event/dsw/dsw_event.c              | 12 ----
 drivers/event/octeontx/ssovf_evdev.h       |  1 -
 drivers/event/octeontx/ssovf_worker.c      | 40 ++----------
 drivers/event/opdl/opdl_evdev.c            |  2 -
 drivers/event/skeleton/skeleton_eventdev.c | 29 ---------
 drivers/event/sw/sw_evdev.c                |  2 -
 drivers/event/sw/sw_evdev.h                |  2 -
 drivers/event/sw/sw_evdev_worker.c         | 12 ----
 lib/eventdev/eventdev_pmd.h                |  4 --
 lib/eventdev/eventdev_private.c            | 22 -------
 lib/eventdev/rte_eventdev.h                | 21 ++----
 lib/eventdev/rte_eventdev_core.h           | 11 ----
 25 files changed, 52 insertions(+), 427 deletions(-)

-- 
2.43.0


^ permalink raw reply	[relevance 3%]

* [PATCH v11 1/2] power: introduce PM QoS API on CPU wide
  2024-10-21 11:42  4% ` [PATCH v11 " Huisong Li
@ 2024-10-21 11:42  5%   ` Huisong Li
  2024-10-22  9:08  0%     ` Konstantin Ananyev
  0 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-10-21 11:42 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index f6674efe2d..91358b04f3 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..d9e268274b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -237,6 +237,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 2f0f3d26e9..9b5d3e8315 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -23,12 +23,14 @@ sources = files(
         'rte_power.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+	'rte_power_qos.c',
 )
 headers = files(
         'rte_power.h',
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+	'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..09692b2161
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == 0)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = 0;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..990c488373
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE             0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT    ((int)(UINT32_MAX >> 1))
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index c9a226614e..08f178a39d 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,4 +51,8 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* [PATCH v11 0/2] power: introduce PM QoS interface
    @ 2024-10-21 11:42  4% ` Huisong Li
  2024-10-21 11:42  5%   ` [PATCH v11 1/2] power: introduce PM QoS API on CPU wide Huisong Li
  2024-10-23  4:09  4% ` [PATCH v12 0/3] power: introduce PM QoS interface Huisong Li
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-10-21 11:42 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide

Huisong Li (2):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 examples/l3fwd-power/main.c            |  24 +++++
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 7 files changed, 250 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* Re: [PATCH v2 2/4] power: refactor uncore power management library
  @ 2024-10-22  2:05  0%         ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-22  2:05 UTC (permalink / raw)
  To: Tummala, Sivaprasad
  Cc: dev, david.hunt, anatoly.burakov, radu.nicolau, jerinj,
	cristian.dumitrescu, konstantin.ananyev, Yigit, Ferruh, gakhil

Hi Sivaprasa,

I have a inline question, please take a look.

在 2024/10/8 14:19, Tummala, Sivaprasad 写道:
> [AMD Official Use Only - AMD Internal Distribution Only]
>
> Hi Lihuisong,
>
>> -----Original Message-----
>> From: lihuisong (C) <lihuisong@huawei.com>
>> Sent: Tuesday, August 27, 2024 6:33 PM
>> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>
>> Cc: dev@dpdk.org; david.hunt@intel.com; anatoly.burakov@intel.com;
>> radu.nicolau@intel.com; jerinj@marvell.com; cristian.dumitrescu@intel.com;
>> konstantin.ananyev@huawei.com; Yigit, Ferruh <Ferruh.Yigit@amd.com>;
>> gakhil@marvell.com
>> Subject: Re: [PATCH v2 2/4] power: refactor uncore power management library
>>
>> Caution: This message originated from an External Source. Use proper caution
>> when opening attachments, clicking links, or responding.
>>
>>
>> Hi Sivaprasad,
>>
>> Suggest to split this patch into two patches for easiler to review:
>> patch-1: abstract a file for uncore dvfs core level, namely, the
>> rte_power_uncore_ops.c you did.
>> patch-2: move and rename, lib/power/power_intel_uncore.c =>
>> drivers/power/intel_uncore/intel_uncore.c
>>
>> patch[1/4] is also too big and not good to review.
>>
>> In addition, I have some question and am not sure if we can adjust uncore init
>> process.
>>
>> /Huisong
>>
>>
>> 在 2024/8/26 21:06, Sivaprasad Tummala 写道:
>>> This patch refactors the power management library, addressing uncore
>>> power management. The primary changes involve the creation of
>>> dedicated directories for each driver within 'drivers/power/uncore/*'.
>>> The adjustment of meson.build files enables the selective activation
>>> of individual drivers.
>>>
>>> This refactor significantly improves code organization, enhances
>>> clarity and boosts maintainability. It lays the foundation for more
>>> focused development on individual drivers and facilitates seamless
>>> integration of future enhancements, particularly the AMD uncore driver.
>>>
>>> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
>>> ---
>>>    .../power/intel_uncore/intel_uncore.c         |  18 +-
>>>    .../power/intel_uncore/intel_uncore.h         |   8 +-
>>>    drivers/power/intel_uncore/meson.build        |   6 +
>>>    drivers/power/meson.build                     |   3 +-
>>>    lib/power/meson.build                         |   2 +-
>>>    lib/power/rte_power_uncore.c                  | 205 ++++++---------
>>>    lib/power/rte_power_uncore.h                  |  87 ++++---
>>>    lib/power/rte_power_uncore_ops.h              | 239 ++++++++++++++++++
>>>    lib/power/version.map                         |   1 +
>>>    9 files changed, 405 insertions(+), 164 deletions(-)
>>>    rename lib/power/power_intel_uncore.c =>
>> drivers/power/intel_uncore/intel_uncore.c (95%)
>>>    rename lib/power/power_intel_uncore.h =>
>> drivers/power/intel_uncore/intel_uncore.h (97%)
>>>    create mode 100644 drivers/power/intel_uncore/meson.build
>>>    create mode 100644 lib/power/rte_power_uncore_ops.h
>>>
>>> diff --git a/lib/power/power_intel_uncore.c
>>> b/drivers/power/intel_uncore/intel_uncore.c
>>> similarity index 95%
>>> rename from lib/power/power_intel_uncore.c rename to
>>> drivers/power/intel_uncore/intel_uncore.c
>>> index 4eb9c5900a..804ad5d755 100644
>>> --- a/lib/power/power_intel_uncore.c
>>> +++ b/drivers/power/intel_uncore/intel_uncore.c
>>> @@ -8,7 +8,7 @@
>>>
>>>    #include <rte_memcpy.h>
>>>
>>> -#include "power_intel_uncore.h"
>>> +#include "intel_uncore.h"
>>>    #include "power_common.h"
>>>
>>>    #define MAX_NUMA_DIE 8
>>> @@ -475,3 +475,19 @@ power_intel_uncore_get_num_dies(unsigned int pkg)
>>>
>>>        return count;
>>>    }
>> <...>
>>> -#endif /* POWER_INTEL_UNCORE_H */
>>> +#endif /* INTEL_UNCORE_H */
>>> diff --git a/drivers/power/intel_uncore/meson.build
>>> b/drivers/power/intel_uncore/meson.build
>>> new file mode 100644
>>> index 0000000000..876df8ad14
>>> --- /dev/null
>>> +++ b/drivers/power/intel_uncore/meson.build
>>> @@ -0,0 +1,6 @@
>>> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel
>>> +Corporation # Copyright(c) 2024 Advanced Micro Devices, Inc.
>>> +
>>> +sources = files('intel_uncore.c')
>>> +deps += ['power']
>>> diff --git a/drivers/power/meson.build b/drivers/power/meson.build
>>> index 8c7215c639..c83047af94 100644
>>> --- a/drivers/power/meson.build
>>> +++ b/drivers/power/meson.build
>>> @@ -6,7 +6,8 @@ drivers = [
>>>            'amd_pstate',
>>>            'cppc',
>>>            'kvm_vm',
>>> -        'pstate'
>>> +        'pstate',
>>> +        'intel_uncore'
>> The cppc, amd_pstate and so on belong to cpufreq scope.
>> And intel_uncore belongs to uncore dvfs scope.
>> They are not the same level. So I proposes that we need to create one directory
>> called like cpufreq or core.
>> This 'intel_uncore' name don't seems appropriate. what do you think the following
>> directory structure:
>> drivers/power/uncore/intel_uncore.c
>> drivers/power/uncore/amd_uncore.c (according to the patch[4/4]).
> At present, Meson does not support detecting an additional level of subdirectories within drivers/*.
> All the drivers maintain a consistent subdirectory structure.
>>>    ]
>>>    std_deps = ['power']
>>> diff --git a/lib/power/meson.build b/lib/power/meson.build index
>>> f3e3451cdc..9b13d98810 100644
>>> --- a/lib/power/meson.build
>>> +++ b/lib/power/meson.build
>>> @@ -13,7 +13,6 @@ if not is_linux
>>>    endif
>>>    sources = files(
>>>            'power_common.c',
>>> -        'power_intel_uncore.c',
>>>            'rte_power.c',
>>>            'rte_power_uncore.c',
>>>            'rte_power_pmd_mgmt.c',
>>> @@ -24,6 +23,7 @@ headers = files(
>>>            'rte_power_guest_channel.h',
>>>            'rte_power_pmd_mgmt.h',
>>>            'rte_power_uncore.h',
>>> +        'rte_power_uncore_ops.h',
>>>    )
>>>    if cc.has_argument('-Wno-cast-qual')
>>>        cflags += '-Wno-cast-qual'
>>> diff --git a/lib/power/rte_power_uncore.c
>>> b/lib/power/rte_power_uncore.c index 48c75a5da0..9f8771224f 100644
>>> --- a/lib/power/rte_power_uncore.c
>>> +++ b/lib/power/rte_power_uncore.c
>>> @@ -1,6 +1,7 @@
>>>    /* SPDX-License-Identifier: BSD-3-Clause
>>>     * Copyright(c) 2010-2014 Intel Corporation
>>>     * Copyright(c) 2023 AMD Corporation
>>> + * Copyright(c) 2024 Advanced Micro Devices, Inc.
>>>     */
>>>
>>>    #include <errno.h>
>>> @@ -12,98 +13,50 @@
>>>    #include "rte_power_uncore.h"
>>>    #include "power_intel_uncore.h"
>>>
>>> -enum rte_uncore_power_mgmt_env default_uncore_env =
>>> RTE_UNCORE_PM_ENV_NOT_SET;
>>> +static enum rte_uncore_power_mgmt_env global_uncore_env =
>>> +RTE_UNCORE_PM_ENV_NOT_SET; static struct rte_power_uncore_ops
>>> +*global_uncore_ops;
>>>
>>>    static rte_spinlock_t global_env_cfg_lock =
>>> RTE_SPINLOCK_INITIALIZER;
>>> +static RTE_TAILQ_HEAD(, rte_power_uncore_ops) uncore_ops_list =
>>> +                     TAILQ_HEAD_INITIALIZER(uncore_ops_list);
>>>
>>> -static uint32_t
>>> -power_get_dummy_uncore_freq(unsigned int pkg __rte_unused,
>>> -            unsigned int die __rte_unused)
>>> -{
>>> -     return 0;
>>> -}
>>> -
>>> -static int
>>> -power_set_dummy_uncore_freq(unsigned int pkg __rte_unused,
>>> -            unsigned int die __rte_unused, uint32_t index __rte_unused)
>>> -{
>>> -     return 0;
>>> -}
>>> +const char *uncore_env_str[] = {
>>> +     "not set",
>>> +     "auto-detect",
>>> +     "intel-uncore",
>>> +     "amd-hsmp"
>>> +};
>> Why open the "auto-detect" mode to user?
>> Why not set this automatically at framework initialization?
>> After all, the uncore driver is fixed for one platform.
> The auto-detection feature has been implemented to enable seamless migration across platforms
> without requiring any changes to the application
>>> -static int
>>> -power_dummy_uncore_freq_max(unsigned int pkg __rte_unused,
>>> -            unsigned int die __rte_unused)
>>> -{
>>> -     return 0;
>>> -}
>>> -
>> <...>
>>> -static int
>>> -power_dummy_uncore_get_num_freqs(unsigned int pkg __rte_unused,
>>> -            unsigned int die __rte_unused)
>>> +/* register the ops struct in rte_power_uncore_ops, return 0 on
>>> +success. */ int rte_power_register_uncore_ops(struct
>>> +rte_power_uncore_ops *driver_ops)
>>>    {
>>> -     return 0;
>>> -}
>>> +     if (!driver_ops->init || !driver_ops->exit || !driver_ops->get_num_pkgs ||
>>> +             !driver_ops->get_num_dies || !driver_ops->get_num_freqs ||
>>> +             !driver_ops->get_avail_freqs || !driver_ops->get_freq ||
>>> +             !driver_ops->set_freq || !driver_ops->freq_max ||
>>> +             !driver_ops->freq_min) {
>>> +             POWER_LOG(ERR, "Missing callbacks while registering power ops");
>>> +             return -1;
>>> +     }
>>> +     if (driver_ops->cb)
>>> +             driver_ops->cb();
>>>
>>> -static unsigned int
>>> -power_dummy_uncore_get_num_pkgs(void)
>>> -{
>>> -     return 0;
>>> -}
>>> +     TAILQ_INSERT_TAIL(&uncore_ops_list, driver_ops, next);
>>>
>>> -static unsigned int
>>> -power_dummy_uncore_get_num_dies(unsigned int pkg __rte_unused) -{
>>>        return 0;
>>>    }
>>> -
>>> -/* function pointers */
>>> -rte_power_get_uncore_freq_t rte_power_get_uncore_freq =
>>> power_get_dummy_uncore_freq; -rte_power_set_uncore_freq_t
>>> rte_power_set_uncore_freq = power_set_dummy_uncore_freq;
>>> -rte_power_uncore_freq_change_t rte_power_uncore_freq_max =
>>> power_dummy_uncore_freq_max; -rte_power_uncore_freq_change_t
>>> rte_power_uncore_freq_min = power_dummy_uncore_freq_min;
>>> -rte_power_uncore_freqs_t rte_power_uncore_freqs =
>>> power_dummy_uncore_freqs; -rte_power_uncore_get_num_freqs_t
>>> rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs;
>>> -rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs =
>>> power_dummy_uncore_get_num_pkgs; -rte_power_uncore_get_num_dies_t
>>> rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies;
>>> -
>>> -static void
>>> -reset_power_uncore_function_ptrs(void)
>>> -{
>>> -     rte_power_get_uncore_freq = power_get_dummy_uncore_freq;
>>> -     rte_power_set_uncore_freq = power_set_dummy_uncore_freq;
>>> -     rte_power_uncore_freq_max = power_dummy_uncore_freq_max;
>>> -     rte_power_uncore_freq_min = power_dummy_uncore_freq_min;
>>> -     rte_power_uncore_freqs  = power_dummy_uncore_freqs;
>>> -     rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs;
>>> -     rte_power_uncore_get_num_pkgs = power_dummy_uncore_get_num_pkgs;
>>> -     rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies;
>>> -}
>>> -
>>>    int
>>>    rte_power_set_uncore_env(enum rte_uncore_power_mgmt_env env)
>>>    {
>>> -     int ret;
>>> +     int ret = -1;
>>> +     struct rte_power_uncore_ops *ops;
>>>
>>>        rte_spinlock_lock(&global_env_cfg_lock);
>>>
>>> -     if (default_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) {
>>> +     if (global_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) {
>>>                POWER_LOG(ERR, "Uncore Power Management Env already set.");
>>> -             rte_spinlock_unlock(&global_env_cfg_lock);
>>> -             return -1;
>>> +             goto out;
>>>        }
>>>
>> <...>
>>> +     if (env <= RTE_DIM(uncore_env_str)) {
>>> +             RTE_TAILQ_FOREACH(ops, &uncore_ops_list, next)
>>> +                     if (strncmp(ops->name, uncore_env_str[env],
>>> +                             RTE_POWER_UNCORE_DRIVER_NAMESZ) == 0) {
>>> +                             global_uncore_env = env;
>>> +                             global_uncore_ops = ops;
>>> +                             ret = 0;
>>> +                             goto out;
>>> +                     }
>>> +             POWER_LOG(ERR, "Power Management (%s) not supported",
>>> +                             uncore_env_str[env]);
>>> +     } else
>>> +             POWER_LOG(ERR, "Invalid Power Management Environment");
>>>
>>> -     default_uncore_env = env;
>>>    out:
>>>        rte_spinlock_unlock(&global_env_cfg_lock);
>>>        return ret;
>>> @@ -139,15 +89,22 @@ void
>>>    rte_power_unset_uncore_env(void)
>>>    {
>>>        rte_spinlock_lock(&global_env_cfg_lock);
>>> -     default_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET;
>>> -     reset_power_uncore_function_ptrs();
>>> +     global_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET;
>>>        rte_spinlock_unlock(&global_env_cfg_lock);
>>>    }
>>>
>> How about abstract an ABI interface to intialize or set the uncore driver on platform
>> by automatical.
>>
>> And later do power_intel_uncore_init_on_die() for each die on different package.
>>>    enum rte_uncore_power_mgmt_env
>>>    rte_power_get_uncore_env(void)
>>>    {
>>> -     return default_uncore_env;
>>> +     return global_uncore_env;
>>> +}
>>> +
>>> +struct rte_power_uncore_ops *
>>> +rte_power_get_uncore_ops(void)
>>> +{
>>> +     RTE_ASSERT(global_uncore_ops != NULL);
>>> +
>>> +     return global_uncore_ops;
>>>    }
>>>
>>>    int
>>> @@ -155,27 +112,29 @@ rte_power_uncore_init(unsigned int pkg, unsigned
>>> int die)
>> This pkg means the socket id on the platform, right?
>> If so, I am not sure that the
>> uncore_info[RTE_MAX_NUMA_NODES][MAX_NUMA_DIE] used in uncore lib is
>> universal for all uncore driver.
>> For example, uncore driver just support do uncore dvfs based on the socket unit.
>> What shoud we do for this? we may need to think twice.
> Yes, pkg represents a socket id. In platforms with a single uncore controller per socket,
> the die ID should be set to '0' for the corresponding socket ID (pkg).
> .
So just use the die ID 0 on one socket ID(namely, uncore_info[0][0], 
uncore_info[1][0]) to initialize the uncore power info on sockets, right?
 From the implement in l3fwd-power, it set all die ID and all sockets.
For the platform with a single uncore controller per socket, their 
uncore driver in DPDK have to ignore other die IDs except die-0 on one 
socket. right?
>>>    {
>>>        int ret = -1;
>>>
>> <...>

^ permalink raw reply	[relevance 0%]

* Re: [PATCH v7 1/5] power: refactor core power management library
  @ 2024-10-22  3:03  3%     ` lihuisong (C)
  2024-10-22  7:13  0%       ` Tummala, Sivaprasad
  0 siblings, 1 reply; 169+ results
From: lihuisong (C) @ 2024-10-22  3:03 UTC (permalink / raw)
  To: Sivaprasad Tummala, david.hunt, konstantin.ananyev
  Cc: dev, anatoly.burakov, jerinj, radu.nicolau, gakhil,
	cristian.dumitrescu, ferruh.yigit

Hi Sivaprasad,

Some comments inline.

在 2024/10/21 12:07, Sivaprasad Tummala 写道:
> This patch introduces a comprehensive refactor to the core power
> management library. The primary focus is on improving modularity
> and organization by relocating specific driver implementations
> from the 'lib/power' directory to dedicated directories within
> 'drivers/power/core/*'. The adjustment of meson.build files
> enables the selective activation of individual drivers.
>
> These changes contribute to a significant enhancement in code
> organization, providing a clearer structure for driver implementations.
> The refactor aims to improve overall code clarity and boost
> maintainability. Additionally, it establishes a foundation for
> future development, allowing for more focused work on individual
> drivers and seamless integration of forthcoming enhancements.
>
> v6:
>   - fixed compilation error with symbol export in API
>   - exported power_get_lcore_mapped_cpu_id as internal API to be
>     used in drivers/power/*
>
> v5:
>   - fixed code style warning
>
> v4:
>   - fixed build error with RTE_ASSERT
>
> v3:
>   - renamed rte_power_core_ops.h as rte_power_cpufreq_api.h
>   - re-worked on auto detection logic
>
> v2:
>   - added NULL check for global_core_ops in rte_power_get_core_ops
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> ---
>   drivers/meson.build                           |   1 +
>   .../power/acpi/acpi_cpufreq.c                 |  22 +-
>   .../power/acpi/acpi_cpufreq.h                 |   6 +-
>   drivers/power/acpi/meson.build                |  10 +
>   .../power/amd_pstate/amd_pstate_cpufreq.c     |  24 +-
>   .../power/amd_pstate/amd_pstate_cpufreq.h     |  10 +-
>   drivers/power/amd_pstate/meson.build          |  10 +
>   .../power/cppc/cppc_cpufreq.c                 |  22 +-
>   .../power/cppc/cppc_cpufreq.h                 |   8 +-
>   drivers/power/cppc/meson.build                |  10 +
>   .../power/kvm_vm}/guest_channel.c             |   0
>   .../power/kvm_vm}/guest_channel.h             |   0
>   .../power/kvm_vm/kvm_vm.c                     |  22 +-
>   .../power/kvm_vm/kvm_vm.h                     |   6 +-
>   drivers/power/kvm_vm/meson.build              |  14 +
>   drivers/power/meson.build                     |  12 +
>   drivers/power/pstate/meson.build              |  10 +
>   .../power/pstate/pstate_cpufreq.c             |  22 +-
>   .../power/pstate/pstate_cpufreq.h             |   6 +-
>   lib/power/meson.build                         |   7 +-
>   lib/power/power_common.c                      |   2 +-
>   lib/power/power_common.h                      |  18 +-
>   lib/power/rte_power.c                         | 355 ++++++++----------
>   lib/power/rte_power.h                         | 116 +++---
>   lib/power/rte_power_cpufreq_api.h             | 206 ++++++++++
>   lib/power/version.map                         |  15 +
>   26 files changed, 665 insertions(+), 269 deletions(-)
>   rename lib/power/power_acpi_cpufreq.c => drivers/power/acpi/acpi_cpufreq.c (95%)
>   rename lib/power/power_acpi_cpufreq.h => drivers/power/acpi/acpi_cpufreq.h (98%)
>   create mode 100644 drivers/power/acpi/meson.build
>   rename lib/power/power_amd_pstate_cpufreq.c => drivers/power/amd_pstate/amd_pstate_cpufreq.c (95%)
>   rename lib/power/power_amd_pstate_cpufreq.h => drivers/power/amd_pstate/amd_pstate_cpufreq.h (96%)
>   create mode 100644 drivers/power/amd_pstate/meson.build
>   rename lib/power/power_cppc_cpufreq.c => drivers/power/cppc/cppc_cpufreq.c (95%)
>   rename lib/power/power_cppc_cpufreq.h => drivers/power/cppc/cppc_cpufreq.h (97%)
>   create mode 100644 drivers/power/cppc/meson.build
>   rename {lib/power => drivers/power/kvm_vm}/guest_channel.c (100%)
>   rename {lib/power => drivers/power/kvm_vm}/guest_channel.h (100%)
>   rename lib/power/power_kvm_vm.c => drivers/power/kvm_vm/kvm_vm.c (82%)
>   rename lib/power/power_kvm_vm.h => drivers/power/kvm_vm/kvm_vm.h (98%)
>   create mode 100644 drivers/power/kvm_vm/meson.build
>   create mode 100644 drivers/power/meson.build
>   create mode 100644 drivers/power/pstate/meson.build
>   rename lib/power/power_pstate_cpufreq.c => drivers/power/pstate/pstate_cpufreq.c (96%)
>   rename lib/power/power_pstate_cpufreq.h => drivers/power/pstate/pstate_cpufreq.h (98%)
>   create mode 100644 lib/power/rte_power_cpufreq_api.h
>
> diff --git a/drivers/meson.build b/drivers/meson.build
> index 2733306698..7ef4f581a0 100644
> --- a/drivers/meson.build
> +++ b/drivers/meson.build
> @@ -29,6 +29,7 @@ subdirs = [
>           'event',          # depends on common, bus, mempool and net.
>           'baseband',       # depends on common and bus.
>           'gpu',            # depends on common and bus.
> +        'power',          # depends on common (in future).
>   ]
>   
>   if meson.is_cross_build()
> diff --git a/lib/power/power_acpi_cpufreq.c b/drivers/power/acpi/acpi_cpufreq.c
> similarity index 95%
> rename from lib/power/power_acpi_cpufreq.c
> rename to drivers/power/acpi/acpi_cpufreq.c
> index ae809fbb60..974fbb7ba8 100644
> --- a/lib/power/power_acpi_cpufreq.c
> +++ b/drivers/power/acpi/acpi_cpufreq.c
> @@ -10,7 +10,7 @@
>   #include <rte_stdatomic.h>
>   #include <rte_string_fns.h>
>   
> -#include "power_acpi_cpufreq.h"
> +#include "acpi_cpufreq.h"
>   #include "power_common.h"
>   
<...>
> diff --git a/lib/power/power_common.c b/lib/power/power_common.c
> index b47c63a5f1..e482f71c64 100644
> --- a/lib/power/power_common.c
> +++ b/lib/power/power_common.c
> @@ -13,7 +13,7 @@
>   
>   #include "power_common.h"
>   
> -RTE_LOG_REGISTER_DEFAULT(power_logtype, INFO);
> +RTE_LOG_REGISTER_DEFAULT(rte_power_logtype, INFO);
>   
>   #define POWER_SYSFILE_SCALING_DRIVER   \
>   		"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_driver"
> diff --git a/lib/power/power_common.h b/lib/power/power_common.h
> index 82fb94d0c0..c294f561bb 100644
> --- a/lib/power/power_common.h
> +++ b/lib/power/power_common.h
> @@ -6,12 +6,13 @@
>   #define _POWER_COMMON_H_
>   
>   #include <rte_common.h>
> +#include <rte_compat.h>
>   #include <rte_log.h>
>   
>   #define RTE_POWER_INVALID_FREQ_INDEX (~0)
>   
> -extern int power_logtype;
> -#define RTE_LOGTYPE_POWER power_logtype
> +extern int rte_power_logtype;
> +#define RTE_LOGTYPE_POWER rte_power_logtype
>   #define POWER_LOG(level, ...) \
>   	RTE_LOG_LINE(level, POWER, "" __VA_ARGS__)
>   
> @@ -23,14 +24,27 @@ extern int power_logtype;
>   #endif
>   
>   /* check if scaling driver matches one we want */
> +__rte_internal
>   int cpufreq_check_scaling_driver(const char *driver);
> +
> +__rte_internal
>   int power_set_governor(unsigned int lcore_id, const char *new_governor,
>   		char *orig_governor, size_t orig_governor_len);
cpufreq_check_scaling_driver and power_set_governor are just used for 
cpufreq, they shouldn't be put in this common header file.
We've come to an aggrement in patch V2 1/4.
I guess you forget it😁
suggest that move these two APIs to rte_power_cpufreq_api.h.
> +
> +__rte_internal
>   int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...)
>   		__rte_format_printf(3, 4);
> +
> +__rte_internal
>   int read_core_sysfs_u32(FILE *f, uint32_t *val);
> +
> +__rte_internal
>   int read_core_sysfs_s(FILE *f, char *buf, unsigned int len);
> +
> +__rte_internal
>   int write_core_sysfs_s(FILE *f, const char *str);
> +
> +__rte_internal
>   int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id);
>   
>   #endif /* _POWER_COMMON_H_ */
> diff --git a/lib/power/rte_power.c b/lib/power/rte_power.c
> index 36c3f3da98..416f0148a3 100644
> --- a/lib/power/rte_power.c
> +++ b/lib/power/rte_power.c
> @@ -6,155 +6,88 @@
>   
>   #include <rte_errno.h>
>   #include <rte_spinlock.h>
> +#include <rte_debug.h>
>   
>   #include "rte_power.h"
> -#include "power_acpi_cpufreq.h"
> -#include "power_cppc_cpufreq.h"
>   #include "power_common.h"
> -#include "power_kvm_vm.h"
> -#include "power_pstate_cpufreq.h"
> -#include "power_amd_pstate_cpufreq.h"
>   
> -enum power_management_env global_default_env = PM_ENV_NOT_SET;
> +static enum power_management_env global_default_env = PM_ENV_NOT_SET;
> +static struct rte_power_core_ops *global_power_core_ops;
>   
>   static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER;
> -
> -/* function pointers */
> -rte_power_freqs_t rte_power_freqs  = NULL;
> -rte_power_get_freq_t rte_power_get_freq = NULL;
> -rte_power_set_freq_t rte_power_set_freq = NULL;
> -rte_power_freq_change_t rte_power_freq_up = NULL;
> -rte_power_freq_change_t rte_power_freq_down = NULL;
> -rte_power_freq_change_t rte_power_freq_max = NULL;
> -rte_power_freq_change_t rte_power_freq_min = NULL;
> -rte_power_freq_change_t rte_power_turbo_status;
> -rte_power_freq_change_t rte_power_freq_enable_turbo;
> -rte_power_freq_change_t rte_power_freq_disable_turbo;
> -rte_power_get_capabilities_t rte_power_get_capabilities;
> -
> -static void
> -reset_power_function_ptrs(void)
> +static RTE_TAILQ_HEAD(, rte_power_core_ops) core_ops_list =
> +			TAILQ_HEAD_INITIALIZER(core_ops_list);
> +
> +const char *power_env_str[] = {
> +	"not set",
> +	"acpi",
> +	"kvm-vm",
> +	"pstate",
> +	"cppc",
> +	"amd-pstate"
> +};
> +

<...>
> +uint32_t
> +rte_power_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t n)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->get_avail_freqs(lcore_id, freqs, n);
> +}
> +
> +uint32_t
> +rte_power_get_freq(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->get_freq(lcore_id);
> +}
> +
> +uint32_t
> +rte_power_set_freq(unsigned int lcore_id, uint32_t index)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->set_freq(lcore_id, index);
> +}
> +
> +int
> +rte_power_freq_up(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->freq_up(lcore_id);
> +}
> +
> +int
> +rte_power_freq_down(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->freq_down(lcore_id);
> +}
> +
> +int
> +rte_power_freq_max(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->freq_max(lcore_id);
> +}
> +
> +int
> +rte_power_freq_min(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->freq_min(lcore_id);
> +}
>   
> +int
> +rte_power_turbo_status(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->turbo_status(lcore_id);
> +}
> +
> +int
> +rte_power_freq_enable_turbo(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->enable_turbo(lcore_id);
> +}
> +
> +int
> +rte_power_freq_disable_turbo(unsigned int lcore_id)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->disable_turbo(lcore_id);
> +}
> +
> +int
> +rte_power_get_capabilities(unsigned int lcore_id,
> +		struct rte_power_core_capabilities *caps)
> +{
> +	RTE_ASSERT(global_power_core_ops != NULL);
> +	return global_power_core_ops->get_caps(lcore_id, caps);
>   }
> diff --git a/lib/power/rte_power.h b/lib/power/rte_power.h
> index 4fa4afe399..e9a72b92ad 100644
> --- a/lib/power/rte_power.h
> +++ b/lib/power/rte_power.h
> @@ -1,5 +1,6 @@
>   /* SPDX-License-Identifier: BSD-3-Clause
>    * Copyright(c) 2010-2014 Intel Corporation
> + * Copyright(c) 2024 Advanced Micro Devices, Inc.
>    */
>   
>   #ifndef _RTE_POWER_H
> @@ -14,14 +15,21 @@
>   #include <rte_log.h>
>   #include <rte_power_guest_channel.h>
>   
> +#include "rte_power_cpufreq_api.h"
 From the name of rte_power.c and rte_power.h, they are supposed to work 
for all power libraries I also proposed in previous version.
But rte_power.* currently just work for cpufreq lib. If we need to put 
all power components togeter and create it.
Now that the rte_power_cpufreq_api.h has been created for cpufreq library.
How about directly rename rte_power.c to rte_poer_cpufreq_api.c and 
rte_power.h to rte_power_cpufreq_api.h?
There will be ABI changes, but it is allowed in this 24.11. If we plan 
to do it later, we'll have to wait another year.
> +
>   #ifdef __cplusplus
>   extern "C" {
>   #endif
>   
>   /* Power Management Environment State */
> -enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM,
> -		PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ,
> -		PM_ENV_AMD_PSTATE_CPUFREQ};
> +enum power_management_env {
> +	PM_ENV_NOT_SET = 0,
> +	PM_ENV_ACPI_CPUFREQ,
> +	PM_ENV_KVM_VM,
> +	PM_ENV_PSTATE_CPUFREQ,
> +	PM_ENV_CPPC_CPUFREQ,
> +	PM_ENV_AMD_PSTATE_CPUFREQ
> +};
>   
<...>

^ permalink raw reply	[relevance 3%]

* RE: [PATCH v7 1/5] power: refactor core power management library
  2024-10-22  3:03  3%     ` lihuisong (C)
@ 2024-10-22  7:13  0%       ` Tummala, Sivaprasad
  2024-10-22  8:36  0%         ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: Tummala, Sivaprasad @ 2024-10-22  7:13 UTC (permalink / raw)
  To: lihuisong (C), david.hunt, konstantin.ananyev
  Cc: dev, anatoly.burakov, jerinj, radu.nicolau, gakhil,
	cristian.dumitrescu, Yigit, Ferruh

[AMD Official Use Only - AMD Internal Distribution Only]

Hi Huisong,

Please find my comments inline.

> -----Original Message-----
> From: lihuisong (C) <lihuisong@huawei.com>
> Sent: Tuesday, October 22, 2024 8:33 AM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>;
> david.hunt@intel.com; konstantin.ananyev@huawei.com
> Cc: dev@dpdk.org; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>
> Subject: Re: [PATCH v7 1/5] power: refactor core power management library
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> Hi Sivaprasad,
>
> Some comments inline.
>
> 在 2024/10/21 12:07, Sivaprasad Tummala 写道:
> > This patch introduces a comprehensive refactor to the core power
> > management library. The primary focus is on improving modularity and
> > organization by relocating specific driver implementations from the
> > 'lib/power' directory to dedicated directories within
> > 'drivers/power/core/*'. The adjustment of meson.build files enables
> > the selective activation of individual drivers.
> >
> > These changes contribute to a significant enhancement in code
> > organization, providing a clearer structure for driver implementations.
> > The refactor aims to improve overall code clarity and boost
> > maintainability. Additionally, it establishes a foundation for future
> > development, allowing for more focused work on individual drivers and
> > seamless integration of forthcoming enhancements.
> >
> > v6:
> >   - fixed compilation error with symbol export in API
> >   - exported power_get_lcore_mapped_cpu_id as internal API to be
> >     used in drivers/power/*
> >
> > v5:
> >   - fixed code style warning
> >
> > v4:
> >   - fixed build error with RTE_ASSERT
> >
> > v3:
> >   - renamed rte_power_core_ops.h as rte_power_cpufreq_api.h
> >   - re-worked on auto detection logic
> >
> > v2:
> >   - added NULL check for global_core_ops in rte_power_get_core_ops
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > ---
> >   drivers/meson.build                           |   1 +
> >   .../power/acpi/acpi_cpufreq.c                 |  22 +-
> >   .../power/acpi/acpi_cpufreq.h                 |   6 +-
> >   drivers/power/acpi/meson.build                |  10 +
> >   .../power/amd_pstate/amd_pstate_cpufreq.c     |  24 +-
> >   .../power/amd_pstate/amd_pstate_cpufreq.h     |  10 +-
> >   drivers/power/amd_pstate/meson.build          |  10 +
> >   .../power/cppc/cppc_cpufreq.c                 |  22 +-
> >   .../power/cppc/cppc_cpufreq.h                 |   8 +-
> >   drivers/power/cppc/meson.build                |  10 +
> >   .../power/kvm_vm}/guest_channel.c             |   0
> >   .../power/kvm_vm}/guest_channel.h             |   0
> >   .../power/kvm_vm/kvm_vm.c                     |  22 +-
> >   .../power/kvm_vm/kvm_vm.h                     |   6 +-
> >   drivers/power/kvm_vm/meson.build              |  14 +
> >   drivers/power/meson.build                     |  12 +
> >   drivers/power/pstate/meson.build              |  10 +
> >   .../power/pstate/pstate_cpufreq.c             |  22 +-
> >   .../power/pstate/pstate_cpufreq.h             |   6 +-
> >   lib/power/meson.build                         |   7 +-
> >   lib/power/power_common.c                      |   2 +-
> >   lib/power/power_common.h                      |  18 +-
> >   lib/power/rte_power.c                         | 355 ++++++++----------
> >   lib/power/rte_power.h                         | 116 +++---
> >   lib/power/rte_power_cpufreq_api.h             | 206 ++++++++++
> >   lib/power/version.map                         |  15 +
> >   26 files changed, 665 insertions(+), 269 deletions(-)
> >   rename lib/power/power_acpi_cpufreq.c => drivers/power/acpi/acpi_cpufreq.c
> (95%)
> >   rename lib/power/power_acpi_cpufreq.h => drivers/power/acpi/acpi_cpufreq.h
> (98%)
> >   create mode 100644 drivers/power/acpi/meson.build
> >   rename lib/power/power_amd_pstate_cpufreq.c =>
> drivers/power/amd_pstate/amd_pstate_cpufreq.c (95%)
> >   rename lib/power/power_amd_pstate_cpufreq.h =>
> drivers/power/amd_pstate/amd_pstate_cpufreq.h (96%)
> >   create mode 100644 drivers/power/amd_pstate/meson.build
> >   rename lib/power/power_cppc_cpufreq.c => drivers/power/cppc/cppc_cpufreq.c
> (95%)
> >   rename lib/power/power_cppc_cpufreq.h => drivers/power/cppc/cppc_cpufreq.h
> (97%)
> >   create mode 100644 drivers/power/cppc/meson.build
> >   rename {lib/power => drivers/power/kvm_vm}/guest_channel.c (100%)
> >   rename {lib/power => drivers/power/kvm_vm}/guest_channel.h (100%)
> >   rename lib/power/power_kvm_vm.c => drivers/power/kvm_vm/kvm_vm.c (82%)
> >   rename lib/power/power_kvm_vm.h => drivers/power/kvm_vm/kvm_vm.h (98%)
> >   create mode 100644 drivers/power/kvm_vm/meson.build
> >   create mode 100644 drivers/power/meson.build
> >   create mode 100644 drivers/power/pstate/meson.build
> >   rename lib/power/power_pstate_cpufreq.c =>
> drivers/power/pstate/pstate_cpufreq.c (96%)
> >   rename lib/power/power_pstate_cpufreq.h =>
> drivers/power/pstate/pstate_cpufreq.h (98%)
> >   create mode 100644 lib/power/rte_power_cpufreq_api.h
> >
> > diff --git a/drivers/meson.build b/drivers/meson.build index
> > 2733306698..7ef4f581a0 100644
> > --- a/drivers/meson.build
> > +++ b/drivers/meson.build
> > @@ -29,6 +29,7 @@ subdirs = [
> >           'event',          # depends on common, bus, mempool and net.
> >           'baseband',       # depends on common and bus.
> >           'gpu',            # depends on common and bus.
> > +        'power',          # depends on common (in future).
> >   ]
> >
> >   if meson.is_cross_build()
> > diff --git a/lib/power/power_acpi_cpufreq.c
> > b/drivers/power/acpi/acpi_cpufreq.c
> > similarity index 95%
> > rename from lib/power/power_acpi_cpufreq.c rename to
> > drivers/power/acpi/acpi_cpufreq.c index ae809fbb60..974fbb7ba8 100644
> > --- a/lib/power/power_acpi_cpufreq.c
> > +++ b/drivers/power/acpi/acpi_cpufreq.c
> > @@ -10,7 +10,7 @@
> >   #include <rte_stdatomic.h>
> >   #include <rte_string_fns.h>
> >
> > -#include "power_acpi_cpufreq.h"
> > +#include "acpi_cpufreq.h"
> >   #include "power_common.h"
> >
> <...>
> > diff --git a/lib/power/power_common.c b/lib/power/power_common.c index
> > b47c63a5f1..e482f71c64 100644
> > --- a/lib/power/power_common.c
> > +++ b/lib/power/power_common.c
> > @@ -13,7 +13,7 @@
> >
> >   #include "power_common.h"
> >
> > -RTE_LOG_REGISTER_DEFAULT(power_logtype, INFO);
> > +RTE_LOG_REGISTER_DEFAULT(rte_power_logtype, INFO);
> >
> >   #define POWER_SYSFILE_SCALING_DRIVER   \
> >               "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_driver"
> > diff --git a/lib/power/power_common.h b/lib/power/power_common.h index
> > 82fb94d0c0..c294f561bb 100644
> > --- a/lib/power/power_common.h
> > +++ b/lib/power/power_common.h
> > @@ -6,12 +6,13 @@
> >   #define _POWER_COMMON_H_
> >
> >   #include <rte_common.h>
> > +#include <rte_compat.h>
> >   #include <rte_log.h>
> >
> >   #define RTE_POWER_INVALID_FREQ_INDEX (~0)
> >
> > -extern int power_logtype;
> > -#define RTE_LOGTYPE_POWER power_logtype
> > +extern int rte_power_logtype;
> > +#define RTE_LOGTYPE_POWER rte_power_logtype
> >   #define POWER_LOG(level, ...) \
> >       RTE_LOG_LINE(level, POWER, "" __VA_ARGS__)
> >
> > @@ -23,14 +24,27 @@ extern int power_logtype;
> >   #endif
> >
> >   /* check if scaling driver matches one we want */
> > +__rte_internal
> >   int cpufreq_check_scaling_driver(const char *driver);
> > +
> > +__rte_internal
> >   int power_set_governor(unsigned int lcore_id, const char *new_governor,
> >               char *orig_governor, size_t orig_governor_len);
> cpufreq_check_scaling_driver and power_set_governor are just used for cpufreq,
> they shouldn't be put in this common header file.
> We've come to an aggrement in patch V2 1/4.
> I guess you forget it😁
> suggest that move these two APIs to rte_power_cpufreq_api.h.
OK!
> > +
> > +__rte_internal
> >   int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...)
> >               __rte_format_printf(3, 4);
> > +
> > +__rte_internal
> >   int read_core_sysfs_u32(FILE *f, uint32_t *val);
> > +
> > +__rte_internal
> >   int read_core_sysfs_s(FILE *f, char *buf, unsigned int len);
> > +
> > +__rte_internal
> >   int write_core_sysfs_s(FILE *f, const char *str);
> > +
> > +__rte_internal
> >   int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t
> > *cpu_id);
> >
> >   #endif /* _POWER_COMMON_H_ */
> > diff --git a/lib/power/rte_power.c b/lib/power/rte_power.c index
> > 36c3f3da98..416f0148a3 100644
> > --- a/lib/power/rte_power.c
> > +++ b/lib/power/rte_power.c
> > @@ -6,155 +6,88 @@
> >
> >   #include <rte_errno.h>
> >   #include <rte_spinlock.h>
> > +#include <rte_debug.h>
> >
> >   #include "rte_power.h"
> > -#include "power_acpi_cpufreq.h"
> > -#include "power_cppc_cpufreq.h"
> >   #include "power_common.h"
> > -#include "power_kvm_vm.h"
> > -#include "power_pstate_cpufreq.h"
> > -#include "power_amd_pstate_cpufreq.h"
> >
> > -enum power_management_env global_default_env = PM_ENV_NOT_SET;
> > +static enum power_management_env global_default_env =
> PM_ENV_NOT_SET;
> > +static struct rte_power_core_ops *global_power_core_ops;
> >
> >   static rte_spinlock_t global_env_cfg_lock =
> > RTE_SPINLOCK_INITIALIZER;
> > -
> > -/* function pointers */
> > -rte_power_freqs_t rte_power_freqs  = NULL; -rte_power_get_freq_t
> > rte_power_get_freq = NULL; -rte_power_set_freq_t rte_power_set_freq =
> > NULL; -rte_power_freq_change_t rte_power_freq_up = NULL;
> > -rte_power_freq_change_t rte_power_freq_down = NULL;
> > -rte_power_freq_change_t rte_power_freq_max = NULL;
> > -rte_power_freq_change_t rte_power_freq_min = NULL;
> > -rte_power_freq_change_t rte_power_turbo_status;
> > -rte_power_freq_change_t rte_power_freq_enable_turbo;
> > -rte_power_freq_change_t rte_power_freq_disable_turbo;
> > -rte_power_get_capabilities_t rte_power_get_capabilities;
> > -
> > -static void
> > -reset_power_function_ptrs(void)
> > +static RTE_TAILQ_HEAD(, rte_power_core_ops) core_ops_list =
> > +                     TAILQ_HEAD_INITIALIZER(core_ops_list);
> > +
> > +const char *power_env_str[] = {
> > +     "not set",
> > +     "acpi",
> > +     "kvm-vm",
> > +     "pstate",
> > +     "cppc",
> > +     "amd-pstate"
> > +};
> > +
>
> <...>
> > +uint32_t
> > +rte_power_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t n) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->get_avail_freqs(lcore_id, freqs,
> > +n); }
> > +
> > +uint32_t
> > +rte_power_get_freq(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->get_freq(lcore_id);
> > +}
> > +
> > +uint32_t
> > +rte_power_set_freq(unsigned int lcore_id, uint32_t index) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->set_freq(lcore_id, index); }
> > +
> > +int
> > +rte_power_freq_up(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->freq_up(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_freq_down(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->freq_down(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_freq_max(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->freq_max(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_freq_min(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->freq_min(lcore_id);
> > +}
> >
> > +int
> > +rte_power_turbo_status(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->turbo_status(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_freq_enable_turbo(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->enable_turbo(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_freq_disable_turbo(unsigned int lcore_id) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->disable_turbo(lcore_id);
> > +}
> > +
> > +int
> > +rte_power_get_capabilities(unsigned int lcore_id,
> > +             struct rte_power_core_capabilities *caps) {
> > +     RTE_ASSERT(global_power_core_ops != NULL);
> > +     return global_power_core_ops->get_caps(lcore_id, caps);
> >   }
> > diff --git a/lib/power/rte_power.h b/lib/power/rte_power.h index
> > 4fa4afe399..e9a72b92ad 100644
> > --- a/lib/power/rte_power.h
> > +++ b/lib/power/rte_power.h
> > @@ -1,5 +1,6 @@
> >   /* SPDX-License-Identifier: BSD-3-Clause
> >    * Copyright(c) 2010-2014 Intel Corporation
> > + * Copyright(c) 2024 Advanced Micro Devices, Inc.
> >    */
> >
> >   #ifndef _RTE_POWER_H
> > @@ -14,14 +15,21 @@
> >   #include <rte_log.h>
> >   #include <rte_power_guest_channel.h>
> >
> > +#include "rte_power_cpufreq_api.h"
>  From the name of rte_power.c and rte_power.h, they are supposed to work for all
> power libraries I also proposed in previous version.
> But rte_power.* currently just work for cpufreq lib. If we need to put all power
> components togeter and create it.
> Now that the rte_power_cpufreq_api.h has been created for cpufreq library.
> How about directly rename rte_power.c to rte_poer_cpufreq_api.c and rte_power.h
> to rte_power_cpufreq_api.h?
> There will be ABI changes, but it is allowed in this 24.11. If we plan to do it later, we'll
> have to wait another year.
Yes, I had split the rte_power.h as part of refactor to avoid exposing internal functions.
Renaming rte_power.*  to rte_power_cpufreq.* can be considered but not merge with rte_power_cpufreq_api.h
> > +
> >   #ifdef __cplusplus
> >   extern "C" {
> >   #endif
> >
> >   /* Power Management Environment State */ -enum power_management_env
> > {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM,
> > -             PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ,
> > -             PM_ENV_AMD_PSTATE_CPUFREQ};
> > +enum power_management_env {
> > +     PM_ENV_NOT_SET = 0,
> > +     PM_ENV_ACPI_CPUFREQ,
> > +     PM_ENV_KVM_VM,
> > +     PM_ENV_PSTATE_CPUFREQ,
> > +     PM_ENV_CPPC_CPUFREQ,
> > +     PM_ENV_AMD_PSTATE_CPUFREQ
> > +};
> >
> <...>

^ permalink raw reply	[relevance 0%]

* Re: [PATCH v7 1/5] power: refactor core power management library
  2024-10-22  7:13  0%       ` Tummala, Sivaprasad
@ 2024-10-22  8:36  0%         ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-22  8:36 UTC (permalink / raw)
  To: Tummala, Sivaprasad, david.hunt, konstantin.ananyev
  Cc: dev, anatoly.burakov, jerinj, radu.nicolau, gakhil,
	cristian.dumitrescu, Yigit, Ferruh


在 2024/10/22 15:13, Tummala, Sivaprasad 写道:
> [AMD Official Use Only - AMD Internal Distribution Only]
>
> Hi Huisong,
>
> Please find my comments inline.
>
>> -----Original Message-----
>> From: lihuisong (C) <lihuisong@huawei.com>
>> Sent: Tuesday, October 22, 2024 8:33 AM
>> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>;
>> david.hunt@intel.com; konstantin.ananyev@huawei.com
>> Cc: dev@dpdk.org; anatoly.burakov@intel.com; jerinj@marvell.com;
>> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
>> Ferruh <Ferruh.Yigit@amd.com>
>> Subject: Re: [PATCH v7 1/5] power: refactor core power management library
>>
>> Caution: This message originated from an External Source. Use proper caution
>> when opening attachments, clicking links, or responding.
>>
>>
>> Hi Sivaprasad,
>>
>> Some comments inline.
>>
>> 在 2024/10/21 12:07, Sivaprasad Tummala 写道:
>>> This patch introduces a comprehensive refactor to the core power
>>> management library. The primary focus is on improving modularity and
>>> organization by relocating specific driver implementations from the
>>> 'lib/power' directory to dedicated directories within
>>> 'drivers/power/core/*'. The adjustment of meson.build files enables
>>> the selective activation of individual drivers.
>>>
>>> These changes contribute to a significant enhancement in code
>>> organization, providing a clearer structure for driver implementations.
>>> The refactor aims to improve overall code clarity and boost
>>> maintainability. Additionally, it establishes a foundation for future
>>> development, allowing for more focused work on individual drivers and
>>> seamless integration of forthcoming enhancements.
>>>
>>> v6:
>>>    - fixed compilation error with symbol export in API
>>>    - exported power_get_lcore_mapped_cpu_id as internal API to be
>>>      used in drivers/power/*
>>>
>>> v5:
>>>    - fixed code style warning
>>>
>>> v4:
>>>    - fixed build error with RTE_ASSERT
>>>
>>> v3:
>>>    - renamed rte_power_core_ops.h as rte_power_cpufreq_api.h
>>>    - re-worked on auto detection logic
>>>
>>> v2:
>>>    - added NULL check for global_core_ops in rte_power_get_core_ops
>>>
>>> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
>>> ---
>>>    drivers/meson.build                           |   1 +
>>>    .../power/acpi/acpi_cpufreq.c                 |  22 +-
>>>    .../power/acpi/acpi_cpufreq.h                 |   6 +-
>>>    drivers/power/acpi/meson.build                |  10 +
>>>    .../power/amd_pstate/amd_pstate_cpufreq.c     |  24 +-
>>>    .../power/amd_pstate/amd_pstate_cpufreq.h     |  10 +-
>>>    drivers/power/amd_pstate/meson.build          |  10 +
>>>    .../power/cppc/cppc_cpufreq.c                 |  22 +-
>>>    .../power/cppc/cppc_cpufreq.h                 |   8 +-
>>>    drivers/power/cppc/meson.build                |  10 +
>>>    .../power/kvm_vm}/guest_channel.c             |   0
>>>    .../power/kvm_vm}/guest_channel.h             |   0
>>>    .../power/kvm_vm/kvm_vm.c                     |  22 +-
>>>    .../power/kvm_vm/kvm_vm.h                     |   6 +-
>>>    drivers/power/kvm_vm/meson.build              |  14 +
>>>    drivers/power/meson.build                     |  12 +
>>>    drivers/power/pstate/meson.build              |  10 +
>>>    .../power/pstate/pstate_cpufreq.c             |  22 +-
>>>    .../power/pstate/pstate_cpufreq.h             |   6 +-
>>>    lib/power/meson.build                         |   7 +-
>>>    lib/power/power_common.c                      |   2 +-
>>>    lib/power/power_common.h                      |  18 +-
>>>    lib/power/rte_power.c                         | 355 ++++++++----------
>>>    lib/power/rte_power.h                         | 116 +++---
>>>    lib/power/rte_power_cpufreq_api.h             | 206 ++++++++++
>>>    lib/power/version.map                         |  15 +
>>>    26 files changed, 665 insertions(+), 269 deletions(-)
>>>    rename lib/power/power_acpi_cpufreq.c => drivers/power/acpi/acpi_cpufreq.c
>> (95%)
>>>    rename lib/power/power_acpi_cpufreq.h => drivers/power/acpi/acpi_cpufreq.h
>> (98%)
>>>    create mode 100644 drivers/power/acpi/meson.build
>>>    rename lib/power/power_amd_pstate_cpufreq.c =>
>> drivers/power/amd_pstate/amd_pstate_cpufreq.c (95%)
>>>    rename lib/power/power_amd_pstate_cpufreq.h =>
>> drivers/power/amd_pstate/amd_pstate_cpufreq.h (96%)
>>>    create mode 100644 drivers/power/amd_pstate/meson.build
>>>    rename lib/power/power_cppc_cpufreq.c => drivers/power/cppc/cppc_cpufreq.c
>> (95%)
>>>    rename lib/power/power_cppc_cpufreq.h => drivers/power/cppc/cppc_cpufreq.h
>> (97%)
>>>    create mode 100644 drivers/power/cppc/meson.build
>>>    rename {lib/power => drivers/power/kvm_vm}/guest_channel.c (100%)
>>>    rename {lib/power => drivers/power/kvm_vm}/guest_channel.h (100%)
>>>    rename lib/power/power_kvm_vm.c => drivers/power/kvm_vm/kvm_vm.c (82%)
>>>    rename lib/power/power_kvm_vm.h => drivers/power/kvm_vm/kvm_vm.h (98%)
>>>    create mode 100644 drivers/power/kvm_vm/meson.build
>>>    create mode 100644 drivers/power/meson.build
>>>    create mode 100644 drivers/power/pstate/meson.build
>>>    rename lib/power/power_pstate_cpufreq.c =>
>> drivers/power/pstate/pstate_cpufreq.c (96%)
>>>    rename lib/power/power_pstate_cpufreq.h =>
>> drivers/power/pstate/pstate_cpufreq.h (98%)
>>>    create mode 100644 lib/power/rte_power_cpufreq_api.h
>>>
>>> diff --git a/drivers/meson.build b/drivers/meson.build index
>>> 2733306698..7ef4f581a0 100644
>>> --- a/drivers/meson.build
>>> +++ b/drivers/meson.build
>>> @@ -29,6 +29,7 @@ subdirs = [
>>>            'event',          # depends on common, bus, mempool and net.
>>>            'baseband',       # depends on common and bus.
>>>            'gpu',            # depends on common and bus.
>>> +        'power',          # depends on common (in future).
>>>    ]
>>>
>>>    if meson.is_cross_build()
>>> diff --git a/lib/power/power_acpi_cpufreq.c
>>> b/drivers/power/acpi/acpi_cpufreq.c
>>> similarity index 95%
>>> rename from lib/power/power_acpi_cpufreq.c rename to
>>> drivers/power/acpi/acpi_cpufreq.c index ae809fbb60..974fbb7ba8 100644
>>> --- a/lib/power/power_acpi_cpufreq.c
>>> +++ b/drivers/power/acpi/acpi_cpufreq.c
>>> @@ -10,7 +10,7 @@
>>>    #include <rte_stdatomic.h>
>>>    #include <rte_string_fns.h>
>>>
>>> -#include "power_acpi_cpufreq.h"
>>> +#include "acpi_cpufreq.h"
>>>    #include "power_common.h"
>>>
>> <...>
>>> diff --git a/lib/power/power_common.c b/lib/power/power_common.c index
>>> b47c63a5f1..e482f71c64 100644
>>> --- a/lib/power/power_common.c
>>> +++ b/lib/power/power_common.c
>>> @@ -13,7 +13,7 @@
>>>
>>>    #include "power_common.h"
>>>
>>> -RTE_LOG_REGISTER_DEFAULT(power_logtype, INFO);
>>> +RTE_LOG_REGISTER_DEFAULT(rte_power_logtype, INFO);
>>>
>>>    #define POWER_SYSFILE_SCALING_DRIVER   \
>>>                "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_driver"
>>> diff --git a/lib/power/power_common.h b/lib/power/power_common.h index
>>> 82fb94d0c0..c294f561bb 100644
>>> --- a/lib/power/power_common.h
>>> +++ b/lib/power/power_common.h
>>> @@ -6,12 +6,13 @@
>>>    #define _POWER_COMMON_H_
>>>
>>>    #include <rte_common.h>
>>> +#include <rte_compat.h>
>>>    #include <rte_log.h>
>>>
>>>    #define RTE_POWER_INVALID_FREQ_INDEX (~0)
>>>
>>> -extern int power_logtype;
>>> -#define RTE_LOGTYPE_POWER power_logtype
>>> +extern int rte_power_logtype;
>>> +#define RTE_LOGTYPE_POWER rte_power_logtype
>>>    #define POWER_LOG(level, ...) \
>>>        RTE_LOG_LINE(level, POWER, "" __VA_ARGS__)
>>>
>>> @@ -23,14 +24,27 @@ extern int power_logtype;
>>>    #endif
>>>
>>>    /* check if scaling driver matches one we want */
>>> +__rte_internal
>>>    int cpufreq_check_scaling_driver(const char *driver);
>>> +
>>> +__rte_internal
>>>    int power_set_governor(unsigned int lcore_id, const char *new_governor,
>>>                char *orig_governor, size_t orig_governor_len);
>> cpufreq_check_scaling_driver and power_set_governor are just used for cpufreq,
>> they shouldn't be put in this common header file.
>> We've come to an aggrement in patch V2 1/4.
>> I guess you forget it😁
>> suggest that move these two APIs to rte_power_cpufreq_api.h.
> OK!
>>> +
>>> +__rte_internal
>>>    int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...)
>>>                __rte_format_printf(3, 4);
>>> +
>>> +__rte_internal
>>>    int read_core_sysfs_u32(FILE *f, uint32_t *val);
>>> +
>>> +__rte_internal
>>>    int read_core_sysfs_s(FILE *f, char *buf, unsigned int len);
>>> +
>>> +__rte_internal
>>>    int write_core_sysfs_s(FILE *f, const char *str);
>>> +
>>> +__rte_internal
>>>    int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t
>>> *cpu_id);
>>>
>>>    #endif /* _POWER_COMMON_H_ */
>>> diff --git a/lib/power/rte_power.c b/lib/power/rte_power.c index
>>> 36c3f3da98..416f0148a3 100644
>>> --- a/lib/power/rte_power.c
>>> +++ b/lib/power/rte_power.c
>>> @@ -6,155 +6,88 @@
>>>
>>>    #include <rte_errno.h>
>>>    #include <rte_spinlock.h>
>>> +#include <rte_debug.h>
>>>
>>>    #include "rte_power.h"
>>> -#include "power_acpi_cpufreq.h"
>>> -#include "power_cppc_cpufreq.h"
>>>    #include "power_common.h"
>>> -#include "power_kvm_vm.h"
>>> -#include "power_pstate_cpufreq.h"
>>> -#include "power_amd_pstate_cpufreq.h"
>>>
>>> -enum power_management_env global_default_env = PM_ENV_NOT_SET;
>>> +static enum power_management_env global_default_env =
>> PM_ENV_NOT_SET;
>>> +static struct rte_power_core_ops *global_power_core_ops;
>>>
>>>    static rte_spinlock_t global_env_cfg_lock =
>>> RTE_SPINLOCK_INITIALIZER;
>>> -
>>> -/* function pointers */
>>> -rte_power_freqs_t rte_power_freqs  = NULL; -rte_power_get_freq_t
>>> rte_power_get_freq = NULL; -rte_power_set_freq_t rte_power_set_freq =
>>> NULL; -rte_power_freq_change_t rte_power_freq_up = NULL;
>>> -rte_power_freq_change_t rte_power_freq_down = NULL;
>>> -rte_power_freq_change_t rte_power_freq_max = NULL;
>>> -rte_power_freq_change_t rte_power_freq_min = NULL;
>>> -rte_power_freq_change_t rte_power_turbo_status;
>>> -rte_power_freq_change_t rte_power_freq_enable_turbo;
>>> -rte_power_freq_change_t rte_power_freq_disable_turbo;
>>> -rte_power_get_capabilities_t rte_power_get_capabilities;
>>> -
>>> -static void
>>> -reset_power_function_ptrs(void)
>>> +static RTE_TAILQ_HEAD(, rte_power_core_ops) core_ops_list =
>>> +                     TAILQ_HEAD_INITIALIZER(core_ops_list);
>>> +
>>> +const char *power_env_str[] = {
>>> +     "not set",
>>> +     "acpi",
>>> +     "kvm-vm",
>>> +     "pstate",
>>> +     "cppc",
>>> +     "amd-pstate"
>>> +};
>>> +
>> <...>
>>> +uint32_t
>>> +rte_power_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t n) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->get_avail_freqs(lcore_id, freqs,
>>> +n); }
>>> +
>>> +uint32_t
>>> +rte_power_get_freq(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->get_freq(lcore_id);
>>> +}
>>> +
>>> +uint32_t
>>> +rte_power_set_freq(unsigned int lcore_id, uint32_t index) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->set_freq(lcore_id, index); }
>>> +
>>> +int
>>> +rte_power_freq_up(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->freq_up(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_freq_down(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->freq_down(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_freq_max(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->freq_max(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_freq_min(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->freq_min(lcore_id);
>>> +}
>>>
>>> +int
>>> +rte_power_turbo_status(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->turbo_status(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_freq_enable_turbo(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->enable_turbo(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_freq_disable_turbo(unsigned int lcore_id) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->disable_turbo(lcore_id);
>>> +}
>>> +
>>> +int
>>> +rte_power_get_capabilities(unsigned int lcore_id,
>>> +             struct rte_power_core_capabilities *caps) {
>>> +     RTE_ASSERT(global_power_core_ops != NULL);
>>> +     return global_power_core_ops->get_caps(lcore_id, caps);
>>>    }
>>> diff --git a/lib/power/rte_power.h b/lib/power/rte_power.h index
>>> 4fa4afe399..e9a72b92ad 100644
>>> --- a/lib/power/rte_power.h
>>> +++ b/lib/power/rte_power.h
>>> @@ -1,5 +1,6 @@
>>>    /* SPDX-License-Identifier: BSD-3-Clause
>>>     * Copyright(c) 2010-2014 Intel Corporation
>>> + * Copyright(c) 2024 Advanced Micro Devices, Inc.
>>>     */
>>>
>>>    #ifndef _RTE_POWER_H
>>> @@ -14,14 +15,21 @@
>>>    #include <rte_log.h>
>>>    #include <rte_power_guest_channel.h>
>>>
>>> +#include "rte_power_cpufreq_api.h"
>>   From the name of rte_power.c and rte_power.h, they are supposed to work for all
>> power libraries I also proposed in previous version.
>> But rte_power.* currently just work for cpufreq lib. If we need to put all power
>> components togeter and create it.
>> Now that the rte_power_cpufreq_api.h has been created for cpufreq library.
>> How about directly rename rte_power.c to rte_poer_cpufreq_api.c and rte_power.h
>> to rte_power_cpufreq_api.h?
>> There will be ABI changes, but it is allowed in this 24.11. If we plan to do it later, we'll
>> have to wait another year.
> Yes, I had split the rte_power.h as part of refactor to avoid exposing internal functions.
> Renaming rte_power.*  to rte_power_cpufreq.* can be considered but not merge with rte_power_cpufreq_api.h
What is your plan? I feel it is not very hard and just rename the file.
>>> +
>>>    #ifdef __cplusplus
>>>    extern "C" {
>>>    #endif
>>>
>>>    /* Power Management Environment State */ -enum power_management_env
>>> {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM,
>>> -             PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ,
>>> -             PM_ENV_AMD_PSTATE_CPUFREQ};
>>> +enum power_management_env {
>>> +     PM_ENV_NOT_SET = 0,
>>> +     PM_ENV_ACPI_CPUFREQ,
>>> +     PM_ENV_KVM_VM,
>>> +     PM_ENV_PSTATE_CPUFREQ,
>>> +     PM_ENV_CPPC_CPUFREQ,
>>> +     PM_ENV_AMD_PSTATE_CPUFREQ
>>> +};
>>>
>> <...>

^ permalink raw reply	[relevance 0%]

* RE: [PATCH v11 1/2] power: introduce PM QoS API on CPU wide
  2024-10-21 11:42  5%   ` [PATCH v11 1/2] power: introduce PM QoS API on CPU wide Huisong Li
@ 2024-10-22  9:08  0%     ` Konstantin Ananyev
  2024-10-22  9:41  0%       ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: Konstantin Ananyev @ 2024-10-22  9:08 UTC (permalink / raw)
  To: lihuisong (C), dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, david.marchand, Fengchengwen,
	liuyonglong



> The deeper the idle state, the lower the power consumption, but the longer
> the resume time. Some service are delay sensitive and very except the low
> resume time, like interrupt packet receiving mode.
> 
> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> interface is used to set and get the resume latency limit on the cpuX for
> userspace. Each cpuidle governor in Linux select which idle state to enter
> based on this CPU resume latency in their idle task.
> 
> The per-CPU PM QoS API can be used to control this CPU's idle state
> selection and limit just enter the shallowest idle state to low the delay
> when wake up from by setting strict resume latency (zero value).
> 
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>

LGTM overall, few nits, see below.
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>

> ---
>  doc/guides/prog_guide/power_man.rst    |  19 ++++
>  doc/guides/rel_notes/release_24_11.rst |   5 +
>  lib/power/meson.build                  |   2 +
>  lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
>  lib/power/rte_power_qos.h              |  73 +++++++++++++++
>  lib/power/version.map                  |   4 +
>  6 files changed, 226 insertions(+)
>  create mode 100644 lib/power/rte_power_qos.c
>  create mode 100644 lib/power/rte_power_qos.h
> 
> diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
> index f6674efe2d..91358b04f3 100644
> --- a/doc/guides/prog_guide/power_man.rst
> +++ b/doc/guides/prog_guide/power_man.rst
> @@ -107,6 +107,25 @@ User Cases
>  The power management mechanism is used to save power when performing L3 forwarding.
> 
> 
> +PM QoS
> +------
> +
> +The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> +interface is used to set and get the resume latency limit on the cpuX for
> +userspace. Each cpuidle governor in Linux select which idle state to enter
> +based on this CPU resume latency in their idle task.
> +
> +The deeper the idle state, the lower the power consumption, but the longer
> +the resume time. Some service are latency sensitive and very except the low
> +resume time, like interrupt packet receiving mode.
> +
> +Applications can set and get the CPU resume latency by the
> +``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
> +respectively. Applications can set a strict resume latency (zero value) by
> +the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
> +get better performance (instead, the power consumption of platform may increase).
> +
> +
>  Ethernet PMD Power Management API
>  ---------------------------------
> 
> diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
> index fa4822d928..d9e268274b 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -237,6 +237,11 @@ New Features
>    This field is used to pass an extra configuration settings such as ability
>    to lookup IPv4 addresses in network byte order.
> 
> +* **Introduce per-CPU PM QoS interface.**
> +
> +  * Add per-CPU PM QoS interface to low the resume latency when wake up from
> +    idle state.
> +
>  * **Added new API to register telemetry endpoint callbacks with private arguments.**
> 
>    A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
> diff --git a/lib/power/meson.build b/lib/power/meson.build
> index 2f0f3d26e9..9b5d3e8315 100644
> --- a/lib/power/meson.build
> +++ b/lib/power/meson.build
> @@ -23,12 +23,14 @@ sources = files(
>          'rte_power.c',
>          'rte_power_uncore.c',
>          'rte_power_pmd_mgmt.c',
> +	'rte_power_qos.c',
>  )
>  headers = files(
>          'rte_power.h',
>          'rte_power_guest_channel.h',
>          'rte_power_pmd_mgmt.h',
>          'rte_power_uncore.h',
> +	'rte_power_qos.h',
>  )
> 
>  deps += ['timer', 'ethdev']
> diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
> new file mode 100644
> index 0000000000..09692b2161
> --- /dev/null
> +++ b/lib/power/rte_power_qos.c
> @@ -0,0 +1,123 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2024 HiSilicon Limited
> + */
> +
> +#include <errno.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +
> +#include "power_common.h"
> +#include "rte_power_qos.h"
> +
> +#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
> +	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
> +
> +#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
> +
> +int
> +rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
> +{
> +	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
> +	uint32_t cpu_id;
> +	FILE *f;
> +	int ret;
> +
> +	if (!rte_lcore_is_enabled(lcore_id)) {
> +		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
> +		return -EINVAL;
> +	}
> +	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
> +	if (ret != 0)
> +		return ret;
> +
> +	if (latency < 0) {
> +		POWER_LOG(ERR, "latency should be greater than and equal to 0");
> +		return -EINVAL;
> +	}
> +
> +	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
> +	if (ret != 0) {
> +		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +			  cpu_id, strerror(errno));
> +		return ret;
> +	}
> +
> +	/*
> +	 * Based on the sysfs interface pm_qos_resume_latency_us under
> +	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
> +	 * is as follows for different input string.
> +	 * 1> the resume latency is 0 if the input is "n/a".
> +	 * 2> the resume latency is no constraint if the input is "0".
> +	 * 3> the resume latency is the actual value to be set.
> +	 */
> +	if (latency == 0)


Why not to use your own macro:
RTE_POWER_QOS_STRICT_LATENCY_VALUE
Instead of hard-coded constant here?

> +		snprintf(buf, sizeof(buf), "%s", "n/a");
> +	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
> +		snprintf(buf, sizeof(buf), "%u", 0);
> +	else
> +		snprintf(buf, sizeof(buf), "%u", latency);
> +
> +	ret = write_core_sysfs_s(f, buf);
> +	if (ret != 0)
> +		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +			  cpu_id, strerror(errno));
> +
> +	fclose(f);
> +
> +	return ret;
> +}
> +
> +int
> +rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
> +{
> +	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
> +	int latency = -1;
> +	uint32_t cpu_id;
> +	FILE *f;
> +	int ret;
> +
> +	if (!rte_lcore_is_enabled(lcore_id)) {
> +		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
> +		return -EINVAL;
> +	}
> +	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
> +	if (ret != 0)
> +		return ret;
> +
> +	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
> +	if (ret != 0) {
> +		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +			  cpu_id, strerror(errno));
> +		return ret;
> +	}
> +
> +	ret = read_core_sysfs_s(f, buf, sizeof(buf));
> +	if (ret != 0) {
> +		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +			  cpu_id, strerror(errno));
> +		goto out;
> +	}
> +
> +	/*
> +	 * Based on the sysfs interface pm_qos_resume_latency_us under
> +	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
> +	 * is as follows for different output string.
> +	 * 1> the resume latency is 0 if the output is "n/a".
> +	 * 2> the resume latency is no constraint if the output is "0".
> +	 * 3> the resume latency is the actual value in used for other string.
> +	 */
> +	if (strcmp(buf, "n/a") == 0)
> +		latency = 0;


RTE_POWER_QOS_STRICT_LATENCY_VALUE
?

> +	else {
> +		latency = strtoul(buf, NULL, 10);
> +		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
> +	}
> +
> +out:
> +	fclose(f);
> +
> +	return latency != -1 ? latency : ret;
> +}
> diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
> new file mode 100644
> index 0000000000..990c488373
> --- /dev/null
> +++ b/lib/power/rte_power_qos.h
> @@ -0,0 +1,73 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2024 HiSilicon Limited
> + */
> +
> +#ifndef RTE_POWER_QOS_H
> +#define RTE_POWER_QOS_H
> +
> +#include <stdint.h>
> +
> +#include <rte_compat.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @file rte_power_qos.h
> + *
> + * PM QoS API.
> + *
> + * The CPU-wide resume latency limit has a positive impact on this CPU's idle
> + * state selection in each cpuidle governor.
> + * Please see the PM QoS on CPU wide in the following link:
> + * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-
> power-pm-qos-resume-latency-us
> + *
> + * The deeper the idle state, the lower the power consumption, but the
> + * longer the resume time. Some service are delay sensitive and very except the
> + * low resume time, like interrupt packet receiving mode.
> + *
> + * In these case, per-CPU PM QoS API can be used to control this CPU's idle
> + * state selection and limit just enter the shallowest idle state to low the
> + * delay after sleep by setting strict resume latency (zero value).
> + */
> +
> +#define RTE_POWER_QOS_STRICT_LATENCY_VALUE             0
> +#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT    ((int)(UINT32_MAX >> 1))

Isn't it just INT32_MAX?

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param lcore_id
> + *   target logical core id
> + *
> + * @param latency
> + *   The latency should be greater than and equal to zero in microseconds unit.
> + *
> + * @return
> + *   0 on success. Otherwise negative value is returned.
> + */
> +__rte_experimental
> +int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the current resume latency of this logical core.
> + * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
> + * if don't set it.
> + *
> + * @return
> + *   Negative value on failure.
> + *   >= 0 means the actual resume latency limit on this core.
> + */
> +__rte_experimental
> +int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* RTE_POWER_QOS_H */
> diff --git a/lib/power/version.map b/lib/power/version.map
> index c9a226614e..08f178a39d 100644
> --- a/lib/power/version.map
> +++ b/lib/power/version.map
> @@ -51,4 +51,8 @@ EXPERIMENTAL {
>  	rte_power_set_uncore_env;
>  	rte_power_uncore_freqs;
>  	rte_power_unset_uncore_env;
> +
> +	# added in 24.11
> +	rte_power_qos_get_cpu_resume_latency;
> +	rte_power_qos_set_cpu_resume_latency;
>  };
> --
> 2.22.0


^ permalink raw reply	[relevance 0%]

* Re: [PATCH v11 1/2] power: introduce PM QoS API on CPU wide
  2024-10-22  9:08  0%     ` Konstantin Ananyev
@ 2024-10-22  9:41  0%       ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-22  9:41 UTC (permalink / raw)
  To: Konstantin Ananyev, dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, david.marchand, Fengchengwen,
	liuyonglong


在 2024/10/22 17:08, Konstantin Ananyev 写道:
>
>> The deeper the idle state, the lower the power consumption, but the longer
>> the resume time. Some service are delay sensitive and very except the low
>> resume time, like interrupt packet receiving mode.
>>
>> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
>> interface is used to set and get the resume latency limit on the cpuX for
>> userspace. Each cpuidle governor in Linux select which idle state to enter
>> based on this CPU resume latency in their idle task.
>>
>> The per-CPU PM QoS API can be used to control this CPU's idle state
>> selection and limit just enter the shallowest idle state to low the delay
>> when wake up from by setting strict resume latency (zero value).
>>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> LGTM overall, few nits, see below.
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
>
>> ---
>>   doc/guides/prog_guide/power_man.rst    |  19 ++++
>>   doc/guides/rel_notes/release_24_11.rst |   5 +
>>   lib/power/meson.build                  |   2 +
>>   lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
>>   lib/power/rte_power_qos.h              |  73 +++++++++++++++
>>   lib/power/version.map                  |   4 +
>>   6 files changed, 226 insertions(+)
>>   create mode 100644 lib/power/rte_power_qos.c
>>   create mode 100644 lib/power/rte_power_qos.h
>>
>> diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
>> index f6674efe2d..91358b04f3 100644
>> --- a/doc/guides/prog_guide/power_man.rst
>> +++ b/doc/guides/prog_guide/power_man.rst
>> @@ -107,6 +107,25 @@ User Cases
>>   The power management mechanism is used to save power when performing L3 forwarding.
>>
>>
>> +PM QoS
>> +------
>> +
>> +The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
>> +interface is used to set and get the resume latency limit on the cpuX for
>> +userspace. Each cpuidle governor in Linux select which idle state to enter
>> +based on this CPU resume latency in their idle task.
>> +
>> +The deeper the idle state, the lower the power consumption, but the longer
>> +the resume time. Some service are latency sensitive and very except the low
>> +resume time, like interrupt packet receiving mode.
>> +
>> +Applications can set and get the CPU resume latency by the
>> +``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
>> +respectively. Applications can set a strict resume latency (zero value) by
>> +the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
>> +get better performance (instead, the power consumption of platform may increase).
>> +
>> +
>>   Ethernet PMD Power Management API
>>   ---------------------------------
>>
>> diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
>> index fa4822d928..d9e268274b 100644
>> --- a/doc/guides/rel_notes/release_24_11.rst
>> +++ b/doc/guides/rel_notes/release_24_11.rst
>> @@ -237,6 +237,11 @@ New Features
>>     This field is used to pass an extra configuration settings such as ability
>>     to lookup IPv4 addresses in network byte order.
>>
>> +* **Introduce per-CPU PM QoS interface.**
>> +
>> +  * Add per-CPU PM QoS interface to low the resume latency when wake up from
>> +    idle state.
>> +
>>   * **Added new API to register telemetry endpoint callbacks with private arguments.**
>>
>>     A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
>> diff --git a/lib/power/meson.build b/lib/power/meson.build
>> index 2f0f3d26e9..9b5d3e8315 100644
>> --- a/lib/power/meson.build
>> +++ b/lib/power/meson.build
>> @@ -23,12 +23,14 @@ sources = files(
>>           'rte_power.c',
>>           'rte_power_uncore.c',
>>           'rte_power_pmd_mgmt.c',
>> +	'rte_power_qos.c',
>>   )
>>   headers = files(
>>           'rte_power.h',
>>           'rte_power_guest_channel.h',
>>           'rte_power_pmd_mgmt.h',
>>           'rte_power_uncore.h',
>> +	'rte_power_qos.h',
>>   )
>>
>>   deps += ['timer', 'ethdev']
>> diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
>> new file mode 100644
>> index 0000000000..09692b2161
>> --- /dev/null
>> +++ b/lib/power/rte_power_qos.c
>> @@ -0,0 +1,123 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2024 HiSilicon Limited
>> + */
>> +
>> +#include <errno.h>
>> +#include <stdlib.h>
>> +#include <string.h>
>> +
>> +#include <rte_lcore.h>
>> +#include <rte_log.h>
>> +
>> +#include "power_common.h"
>> +#include "rte_power_qos.h"
>> +
>> +#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
>> +	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
>> +
>> +#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
>> +
>> +int
>> +rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
>> +{
>> +	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
>> +	uint32_t cpu_id;
>> +	FILE *f;
>> +	int ret;
>> +
>> +	if (!rte_lcore_is_enabled(lcore_id)) {
>> +		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
>> +		return -EINVAL;
>> +	}
>> +	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
>> +	if (ret != 0)
>> +		return ret;
>> +
>> +	if (latency < 0) {
>> +		POWER_LOG(ERR, "latency should be greater than and equal to 0");
>> +		return -EINVAL;
>> +	}
>> +
>> +	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
>> +	if (ret != 0) {
>> +		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
>> +			  cpu_id, strerror(errno));
>> +		return ret;
>> +	}
>> +
>> +	/*
>> +	 * Based on the sysfs interface pm_qos_resume_latency_us under
>> +	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
>> +	 * is as follows for different input string.
>> +	 * 1> the resume latency is 0 if the input is "n/a".
>> +	 * 2> the resume latency is no constraint if the input is "0".
>> +	 * 3> the resume latency is the actual value to be set.
>> +	 */
>> +	if (latency == 0)
>
> Why not to use your own macro:
> RTE_POWER_QOS_STRICT_LATENCY_VALUE
> Instead of hard-coded constant here?
you are right. will fix it in next version.
>
>> +		snprintf(buf, sizeof(buf), "%s", "n/a");
>> +	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
>> +		snprintf(buf, sizeof(buf), "%u", 0);
>> +	else
>> +		snprintf(buf, sizeof(buf), "%u", latency);
>> +
>> +	ret = write_core_sysfs_s(f, buf);
>> +	if (ret != 0)
>> +		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
>> +			  cpu_id, strerror(errno));
>> +
>> +	fclose(f);
>> +
>> +	return ret;
>> +}
>> +
>> +int
>> +rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
>> +{
>> +	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
>> +	int latency = -1;
>> +	uint32_t cpu_id;
>> +	FILE *f;
>> +	int ret;
>> +
>> +	if (!rte_lcore_is_enabled(lcore_id)) {
>> +		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
>> +		return -EINVAL;
>> +	}
>> +	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
>> +	if (ret != 0)
>> +		return ret;
>> +
>> +	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
>> +	if (ret != 0) {
>> +		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
>> +			  cpu_id, strerror(errno));
>> +		return ret;
>> +	}
>> +
>> +	ret = read_core_sysfs_s(f, buf, sizeof(buf));
>> +	if (ret != 0) {
>> +		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
>> +			  cpu_id, strerror(errno));
>> +		goto out;
>> +	}
>> +
>> +	/*
>> +	 * Based on the sysfs interface pm_qos_resume_latency_us under
>> +	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
>> +	 * is as follows for different output string.
>> +	 * 1> the resume latency is 0 if the output is "n/a".
>> +	 * 2> the resume latency is no constraint if the output is "0".
>> +	 * 3> the resume latency is the actual value in used for other string.
>> +	 */
>> +	if (strcmp(buf, "n/a") == 0)
>> +		latency = 0;
>
> RTE_POWER_QOS_STRICT_LATENCY_VALUE
Ack
> ?
>
>> +	else {
>> +		latency = strtoul(buf, NULL, 10);
>> +		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
>> +	}
>> +
>> +out:
>> +	fclose(f);
>> +
>> +	return latency != -1 ? latency : ret;
>> +}
>> diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
>> new file mode 100644
>> index 0000000000..990c488373
>> --- /dev/null
>> +++ b/lib/power/rte_power_qos.h
>> @@ -0,0 +1,73 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2024 HiSilicon Limited
>> + */
>> +
>> +#ifndef RTE_POWER_QOS_H
>> +#define RTE_POWER_QOS_H
>> +
>> +#include <stdint.h>
>> +
>> +#include <rte_compat.h>
>> +
>> +#ifdef __cplusplus
>> +extern "C" {
>> +#endif
>> +
>> +/**
>> + * @file rte_power_qos.h
>> + *
>> + * PM QoS API.
>> + *
>> + * The CPU-wide resume latency limit has a positive impact on this CPU's idle
>> + * state selection in each cpuidle governor.
>> + * Please see the PM QoS on CPU wide in the following link:
>> + * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-
>> power-pm-qos-resume-latency-us
>> + *
>> + * The deeper the idle state, the lower the power consumption, but the
>> + * longer the resume time. Some service are delay sensitive and very except the
>> + * low resume time, like interrupt packet receiving mode.
>> + *
>> + * In these case, per-CPU PM QoS API can be used to control this CPU's idle
>> + * state selection and limit just enter the shallowest idle state to low the
>> + * delay after sleep by setting strict resume latency (zero value).
>> + */
>> +
>> +#define RTE_POWER_QOS_STRICT_LATENCY_VALUE             0
>> +#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT    ((int)(UINT32_MAX >> 1))
> Isn't it just INT32_MAX?
will fix it.
>
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * @param lcore_id
>> + *   target logical core id
>> + *
>> + * @param latency
>> + *   The latency should be greater than and equal to zero in microseconds unit.
>> + *
>> + * @return
>> + *   0 on success. Otherwise negative value is returned.
>> + */
>> +__rte_experimental
>> +int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Get the current resume latency of this logical core.
>> + * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
>> + * if don't set it.
>> + *
>> + * @return
>> + *   Negative value on failure.
>> + *   >= 0 means the actual resume latency limit on this core.
>> + */
>> +__rte_experimental
>> +int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
>> +
>> +#ifdef __cplusplus
>> +}
>> +#endif
>> +
>> +#endif /* RTE_POWER_QOS_H */
>> diff --git a/lib/power/version.map b/lib/power/version.map
>> index c9a226614e..08f178a39d 100644
>> --- a/lib/power/version.map
>> +++ b/lib/power/version.map
>> @@ -51,4 +51,8 @@ EXPERIMENTAL {
>>   	rte_power_set_uncore_env;
>>   	rte_power_uncore_freqs;
>>   	rte_power_unset_uncore_env;
>> +
>> +	# added in 24.11
>> +	rte_power_qos_get_cpu_resume_latency;
>> +	rte_power_qos_set_cpu_resume_latency;
>>   };
>> --
>> 2.22.0

^ permalink raw reply	[relevance 0%]

* [PATCH v6 0/3] add ec points to sm2 op
@ 2024-10-22 19:05  3% Arkadiusz Kusztal
  2024-10-22 19:05  5% ` [PATCH v6 1/3] cryptodev: " Arkadiusz Kusztal
  2024-10-23  1:19  0% ` [PATCH v6 0/3] " Stephen Hemminger
  0 siblings, 2 replies; 169+ results
From: Arkadiusz Kusztal @ 2024-10-22 19:05 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

v2:
- rebased against the 24.11 code
v3:
- added feature flag
- added QAT patches
- added test patches
v4:
- replaced feature flag with capability
- split API patches
v5:
- rebased
- clarified usage of the partial flag
v6:
- removed already applied patch 1
- added ABI relase notes comment
- removed camel case
- added flag reference

Arkadiusz Kusztal (3):
  cryptodev: add ec points to sm2 op
  crypto/qat: add sm2 encryption/decryption function
  app/test: add test sm2 C1/Kp test cases

 app/test/test_cryptodev_asym.c                | 138 ++++++++++++++++-
 app/test/test_cryptodev_sm2_test_vectors.h    | 112 +++++++++++++-
 doc/guides/cryptodevs/features/qat.ini        |   1 +
 doc/guides/rel_notes/release_24_11.rst        |   7 +
 .../common/qat/qat_adf/icp_qat_fw_mmp_ids.h   |   3 +
 drivers/common/qat/qat_adf/qat_pke.h          |  20 +++
 drivers/crypto/qat/qat_asym.c                 | 140 +++++++++++++++++-
 lib/cryptodev/rte_crypto_asym.h               |  56 +++++--
 8 files changed, 453 insertions(+), 24 deletions(-)

-- 
2.17.1


^ permalink raw reply	[relevance 3%]

* [PATCH v6 1/3] cryptodev: add ec points to sm2 op
  2024-10-22 19:05  3% [PATCH v6 0/3] add ec points to sm2 op Arkadiusz Kusztal
@ 2024-10-22 19:05  5% ` Arkadiusz Kusztal
  2024-10-23  1:19  0% ` [PATCH v6 0/3] " Stephen Hemminger
  1 sibling, 0 replies; 169+ results
From: Arkadiusz Kusztal @ 2024-10-22 19:05 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

Points C1, kP therefore were added to the SM2 crypto operation struct.

Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
 doc/guides/rel_notes/release_24_11.rst |  3 ++
 lib/cryptodev/rte_crypto_asym.h        | 56 +++++++++++++++++++-------
 2 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..0f91dae987 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -406,6 +406,9 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* cryptodev: The ``rte_crypto_sm2_op_param`` struct member to hold ciphertext
+  is changed to union data type. This change is to support partial SM2 calculation.
+
 
 Known Issues
 ------------
diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h
index aeb46e688e..f095cebcd0 100644
--- a/lib/cryptodev/rte_crypto_asym.h
+++ b/lib/cryptodev/rte_crypto_asym.h
@@ -646,6 +646,8 @@ enum rte_crypto_sm2_op_capa {
 	/**< Random number generator supported in SM2 ops. */
 	RTE_CRYPTO_SM2_PH,
 	/**< Prehash message before crypto op. */
+	RTE_CRYPTO_SM2_PARTIAL,
+	/**< Calculate elliptic curve points only. */
 };
 
 /**
@@ -673,20 +675,46 @@ struct rte_crypto_sm2_op_param {
 	 * will be overwritten by the PMD with the decrypted length.
 	 */
 
-	rte_crypto_param cipher;
-	/**<
-	 * Pointer to input data
-	 * - to be decrypted for SM2 private decrypt.
-	 *
-	 * Pointer to output data
-	 * - for SM2 public encrypt.
-	 * In this case the underlying array should have been allocated
-	 * with enough memory to hold ciphertext output (at least X bytes
-	 * for prime field curve of N bytes and for message M bytes,
-	 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
-	 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
-	 * be overwritten by the PMD with the encrypted length.
-	 */
+	union {
+		rte_crypto_param cipher;
+		/**<
+		 * Pointer to input data
+		 * - to be decrypted for SM2 private decrypt.
+		 *
+		 * Pointer to output data
+		 * - for SM2 public encrypt.
+		 * In this case the underlying array should have been allocated
+		 * with enough memory to hold ciphertext output (at least X bytes
+		 * for prime field curve of N bytes and for message M bytes,
+		 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
+		 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
+		 * be overwritten by the PMD with the encrypted length.
+		 */
+		struct {
+			struct rte_crypto_ec_point c1;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * In the case of encryption, it is an output - point C1 = (x1,y1).
+			 * In the case of decryption, if is an input - point C1 = (x1,y1).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+			struct rte_crypto_ec_point kp;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * It is an output in the encryption case, it is a point
+			 * [k]P = (x2,y2).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+		};
+	};
 
 	rte_crypto_uint id;
 	/**< The SM2 id used by signer and verifier. */
-- 
2.17.1


^ permalink raw reply	[relevance 5%]

* Re: [PATCH v6 0/3] add ec points to sm2 op
  2024-10-22 19:05  3% [PATCH v6 0/3] add ec points to sm2 op Arkadiusz Kusztal
  2024-10-22 19:05  5% ` [PATCH v6 1/3] cryptodev: " Arkadiusz Kusztal
@ 2024-10-23  1:19  0% ` Stephen Hemminger
  1 sibling, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-23  1:19 UTC (permalink / raw)
  To: Arkadiusz Kusztal; +Cc: dev, gakhil, brian.dooley

On Tue, 22 Oct 2024 20:05:57 +0100
Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com> wrote:

> In the case when PMD cannot support the full process of the SM2,
> but elliptic curve computation only, additional fields
> are needed to handle such a case.
> 
> v2:
> - rebased against the 24.11 code
> v3:
> - added feature flag
> - added QAT patches
> - added test patches
> v4:
> - replaced feature flag with capability
> - split API patches
> v5:
> - rebased
> - clarified usage of the partial flag
> v6:
> - removed already applied patch 1
> - added ABI relase notes comment
> - removed camel case
> - added flag reference
> 
> Arkadiusz Kusztal (3):
>   cryptodev: add ec points to sm2 op
>   crypto/qat: add sm2 encryption/decryption function
>   app/test: add test sm2 C1/Kp test cases
> 
>  app/test/test_cryptodev_asym.c                | 138 ++++++++++++++++-
>  app/test/test_cryptodev_sm2_test_vectors.h    | 112 +++++++++++++-
>  doc/guides/cryptodevs/features/qat.ini        |   1 +
>  doc/guides/rel_notes/release_24_11.rst        |   7 +
>  .../common/qat/qat_adf/icp_qat_fw_mmp_ids.h   |   3 +
>  drivers/common/qat/qat_adf/qat_pke.h          |  20 +++
>  drivers/crypto/qat/qat_asym.c                 | 140 +++++++++++++++++-
>  lib/cryptodev/rte_crypto_asym.h               |  56 +++++--
>  8 files changed, 453 insertions(+), 24 deletions(-)

There is an issue with new feature missing in some of the templates of the doc.

$ ninja -C build doc
ninja: Entering directory `build'
[4/6] Generating doc/api/dts/dts_api_html with a custom command
Warning generate_overview_table(): Unknown feature 'SM2' in 'qat.ini'


^ permalink raw reply	[relevance 0%]

* [PATCH v12 0/3] power: introduce PM QoS interface
      2024-10-21 11:42  4% ` [PATCH v11 " Huisong Li
@ 2024-10-23  4:09  4% ` Huisong Li
  2024-10-23  4:09  5%   ` [PATCH v12 1/3] power: introduce PM QoS API on CPU wide Huisong Li
  2024-10-25  9:18  4% ` [PATCH v13 0/3] power: introduce PM QoS interface Huisong Li
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-10-23  4:09 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v12:
  - add Acked-by Chengwen and Konstantin
  - fix overflow issue in l3fwd-power when parse command line
  - add a command parameter to set CPU resume latency
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide

Huisong Li (3):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: fix data overflow when parse command line
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst           |  19 +++
 doc/guides/rel_notes/release_24_11.rst        |   5 +
 .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
 examples/l3fwd-power/main.c                   |  92 +++++++++++--
 lib/power/meson.build                         |   2 +
 lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
 lib/power/rte_power_qos.h                     |  73 +++++++++++
 lib/power/version.map                         |   4 +
 8 files changed, 308 insertions(+), 15 deletions(-)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* [PATCH v12 1/3] power: introduce PM QoS API on CPU wide
  2024-10-23  4:09  4% ` [PATCH v12 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-10-23  4:09  5%   ` Huisong Li
  0 siblings, 0 replies; 169+ results
From: Huisong Li @ 2024-10-23  4:09 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index f6674efe2d..91358b04f3 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..d9e268274b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -237,6 +237,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 2f0f3d26e9..9b5d3e8315 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -23,12 +23,14 @@ sources = files(
         'rte_power.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+	'rte_power_qos.c',
 )
 headers = files(
         'rte_power.h',
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+	'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..4dd0532b36
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..7a8dab9272
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE		0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT	INT32_MAX
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index c9a226614e..08f178a39d 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,4 +51,8 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* [PATCH v7 0/3] add ec points to sm2 op
@ 2024-10-23  8:19  3% Arkadiusz Kusztal
  2024-10-23  8:19  5% ` [PATCH v7 1/3] cryptodev: " Arkadiusz Kusztal
  0 siblings, 1 reply; 169+ results
From: Arkadiusz Kusztal @ 2024-10-23  8:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

v2:
- rebased against the 24.11 code
v3:
- added feature flag
- added QAT patches
- added test patches
v4:
- replaced feature flag with capability
- split API patches
v5:
- rebased
- clarified usage of the partial flag
v6:
- removed already applied patch 1
- added ABI relase notes comment
- removed camel case
- added flag reference
v7:
- removed SM2 from auth features, in asym it was added in SM2 ECDSA patch

Arkadiusz Kusztal (3):
  cryptodev: add ec points to sm2 op
  crypto/qat: add sm2 encryption/decryption function
  app/test: add test sm2 C1/Kp test cases

 app/test/test_cryptodev_asym.c                | 138 ++++++++++++++++-
 app/test/test_cryptodev_sm2_test_vectors.h    | 112 +++++++++++++-
 doc/guides/rel_notes/release_24_11.rst        |   7 +
 .../common/qat/qat_adf/icp_qat_fw_mmp_ids.h   |   3 +
 drivers/common/qat/qat_adf/qat_pke.h          |  20 +++
 drivers/crypto/qat/qat_asym.c                 | 140 +++++++++++++++++-
 lib/cryptodev/rte_crypto_asym.h               |  56 +++++--
 7 files changed, 452 insertions(+), 24 deletions(-)

-- 
2.17.1


^ permalink raw reply	[relevance 3%]

* [PATCH v7 1/3] cryptodev: add ec points to sm2 op
  2024-10-23  8:19  3% [PATCH v7 " Arkadiusz Kusztal
@ 2024-10-23  8:19  5% ` Arkadiusz Kusztal
  0 siblings, 0 replies; 169+ results
From: Arkadiusz Kusztal @ 2024-10-23  8:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

Points C1, kP therefore were added to the SM2 crypto operation struct.

Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
 doc/guides/rel_notes/release_24_11.rst |  3 ++
 lib/cryptodev/rte_crypto_asym.h        | 56 +++++++++++++++++++-------
 2 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..0f91dae987 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -406,6 +406,9 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* cryptodev: The ``rte_crypto_sm2_op_param`` struct member to hold ciphertext
+  is changed to union data type. This change is to support partial SM2 calculation.
+
 
 Known Issues
 ------------
diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h
index aeb46e688e..f095cebcd0 100644
--- a/lib/cryptodev/rte_crypto_asym.h
+++ b/lib/cryptodev/rte_crypto_asym.h
@@ -646,6 +646,8 @@ enum rte_crypto_sm2_op_capa {
 	/**< Random number generator supported in SM2 ops. */
 	RTE_CRYPTO_SM2_PH,
 	/**< Prehash message before crypto op. */
+	RTE_CRYPTO_SM2_PARTIAL,
+	/**< Calculate elliptic curve points only. */
 };
 
 /**
@@ -673,20 +675,46 @@ struct rte_crypto_sm2_op_param {
 	 * will be overwritten by the PMD with the decrypted length.
 	 */
 
-	rte_crypto_param cipher;
-	/**<
-	 * Pointer to input data
-	 * - to be decrypted for SM2 private decrypt.
-	 *
-	 * Pointer to output data
-	 * - for SM2 public encrypt.
-	 * In this case the underlying array should have been allocated
-	 * with enough memory to hold ciphertext output (at least X bytes
-	 * for prime field curve of N bytes and for message M bytes,
-	 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
-	 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
-	 * be overwritten by the PMD with the encrypted length.
-	 */
+	union {
+		rte_crypto_param cipher;
+		/**<
+		 * Pointer to input data
+		 * - to be decrypted for SM2 private decrypt.
+		 *
+		 * Pointer to output data
+		 * - for SM2 public encrypt.
+		 * In this case the underlying array should have been allocated
+		 * with enough memory to hold ciphertext output (at least X bytes
+		 * for prime field curve of N bytes and for message M bytes,
+		 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
+		 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
+		 * be overwritten by the PMD with the encrypted length.
+		 */
+		struct {
+			struct rte_crypto_ec_point c1;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * In the case of encryption, it is an output - point C1 = (x1,y1).
+			 * In the case of decryption, if is an input - point C1 = (x1,y1).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+			struct rte_crypto_ec_point kp;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * It is an output in the encryption case, it is a point
+			 * [k]P = (x2,y2).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+		};
+	};
 
 	rte_crypto_uint id;
 	/**< The SM2 id used by signer and verifier. */
-- 
2.17.1


^ permalink raw reply	[relevance 5%]

* [PATCH v27 14/14] doc: add release note about log library
  @ 2024-10-24  3:18  4%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-24  3:18 UTC (permalink / raw)
  To: dev
  Cc: Stephen Hemminger, Morten Brørup, Bruce Richardson, Chengwen Feng

Significant enough to add some documentation.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/rel_notes/release_24_11.rst | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..ec4b7ba2a4 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -349,6 +349,25 @@ API Changes
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
 
+* **Logging library changes**
+
+  * The log is initialized earlier in startup so all messages go through the library.
+
+  * Added a new option to timestamp log messages, which is useful for
+    debugging delays in application and driver startup.
+
+  * Syslog option change. If *--syslog* is specified, then messages
+    will go to syslog; if not specified then messages will only be displayed
+    on stderr. This option is now supported on FreeBSD (but not on Windows).
+
+  * If the application is a systemd service and the log output is being
+    sent of standard error then DPDK will switch to journal native protocol.
+
+  * Log messages can be timestamped with *--log-timestamp* option.
+
+  * Log messages can be colorized with the *--log-color* option.
+
+
 ABI Changes
 -----------
 
-- 
2.45.2


^ permalink raw reply	[relevance 4%]

* [PATCH v28 13/13] doc: add release note about log library
  @ 2024-10-24 19:02  4%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-24 19:02 UTC (permalink / raw)
  To: dev
  Cc: Stephen Hemminger, Morten Brørup, Bruce Richardson, Chengwen Feng

Significant enough to add some documentation.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/rel_notes/release_24_11.rst | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..ec4b7ba2a4 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -349,6 +349,25 @@ API Changes
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
 
+* **Logging library changes**
+
+  * The log is initialized earlier in startup so all messages go through the library.
+
+  * Added a new option to timestamp log messages, which is useful for
+    debugging delays in application and driver startup.
+
+  * Syslog option change. If *--syslog* is specified, then messages
+    will go to syslog; if not specified then messages will only be displayed
+    on stderr. This option is now supported on FreeBSD (but not on Windows).
+
+  * If the application is a systemd service and the log output is being
+    sent of standard error then DPDK will switch to journal native protocol.
+
+  * Log messages can be timestamped with *--log-timestamp* option.
+
+  * Log messages can be colorized with the *--log-color* option.
+
+
 ABI Changes
 -----------
 
-- 
2.45.2


^ permalink raw reply	[relevance 4%]

* [PATCH v13 0/3] power: introduce PM QoS interface
                     ` (2 preceding siblings ...)
  2024-10-23  4:09  4% ` [PATCH v12 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-10-25  9:18  4% ` Huisong Li
  2024-10-25  9:18  5%   ` [PATCH v13 1/3] power: introduce PM QoS API on CPU wide Huisong Li
  2024-10-29 13:28  4% ` [PATCH v14 0/3] power: introduce PM QoS interface Huisong Li
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-10-25  9:18 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v13:
  - not allow negative value for --cpu-resume-latency.
  - restore to the original value as Konstantin suggested.
 v12:
  - add Acked-by Chengwen and Konstantin
  - fix overflow issue in l3fwd-power when parse command line
  - add a command parameter to set CPU resume latency
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide

Huisong Li (3):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: fix data overflow when parse command line
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst           |  19 +++
 doc/guides/rel_notes/release_24_11.rst        |   5 +
 .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
 examples/l3fwd-power/main.c                   | 115 ++++++++++++++--
 lib/power/meson.build                         |   2 +
 lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
 lib/power/rte_power_qos.h                     |  73 +++++++++++
 lib/power/version.map                         |   4 +
 8 files changed, 331 insertions(+), 15 deletions(-)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* [PATCH v13 1/3] power: introduce PM QoS API on CPU wide
  2024-10-25  9:18  4% ` [PATCH v13 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-10-25  9:18  5%   ` Huisong Li
  2024-10-25 12:08  0%     ` Tummala, Sivaprasad
  0 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-10-25  9:18 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index f6674efe2d..91358b04f3 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..d9e268274b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -237,6 +237,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 2f0f3d26e9..9b5d3e8315 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -23,12 +23,14 @@ sources = files(
         'rte_power.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+	'rte_power_qos.c',
 )
 headers = files(
         'rte_power.h',
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+	'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..4dd0532b36
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..7a8dab9272
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE		0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT	INT32_MAX
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index c9a226614e..08f178a39d 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,4 +51,8 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* RE: [PATCH v13 1/3] power: introduce PM QoS API on CPU wide
  2024-10-25  9:18  5%   ` [PATCH v13 1/3] power: introduce PM QoS API on CPU wide Huisong Li
@ 2024-10-25 12:08  0%     ` Tummala, Sivaprasad
  0 siblings, 0 replies; 169+ results
From: Tummala, Sivaprasad @ 2024-10-25 12:08 UTC (permalink / raw)
  To: Huisong Li, dev
  Cc: mb, thomas, Yigit, Ferruh, anatoly.burakov, david.hunt, stephen,
	konstantin.ananyev, david.marchand, fengchengwen, liuyonglong

[AMD Official Use Only - AMD Internal Distribution Only]

Hi Huisong,

LGTM! One comment to update the doxygen documentation for the new APIs.

> -----Original Message-----
> From: Huisong Li <lihuisong@huawei.com>
> Sent: Friday, October 25, 2024 2:49 PM
> To: dev@dpdk.org
> Cc: mb@smartsharesystems.com; thomas@monjalon.net; Yigit, Ferruh
> <Ferruh.Yigit@amd.com>; anatoly.burakov@intel.com; david.hunt@intel.com;
> Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>;
> stephen@networkplumber.org; konstantin.ananyev@huawei.com;
> david.marchand@redhat.com; fengchengwen@huawei.com;
> liuyonglong@huawei.com; lihuisong@huawei.com
> Subject: [PATCH v13 1/3] power: introduce PM QoS API on CPU wide
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> The deeper the idle state, the lower the power consumption, but the longer the
> resume time. Some service are delay sensitive and very except the low resume
> time, like interrupt packet receiving mode.
>
> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> interface is used to set and get the resume latency limit on the cpuX for userspace.
> Each cpuidle governor in Linux select which idle state to enter based on this CPU
> resume latency in their idle task.
>
> The per-CPU PM QoS API can be used to control this CPU's idle state selection
> and limit just enter the shallowest idle state to low the delay when wake up from by
> setting strict resume latency (zero value).
>
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> ---
>  doc/guides/prog_guide/power_man.rst    |  19 ++++
>  doc/guides/rel_notes/release_24_11.rst |   5 +
>  lib/power/meson.build                  |   2 +
>  lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
>  lib/power/rte_power_qos.h              |  73 +++++++++++++++
>  lib/power/version.map                  |   4 +
>  6 files changed, 226 insertions(+)
>  create mode 100644 lib/power/rte_power_qos.c  create mode 100644
> lib/power/rte_power_qos.h
>
> diff --git a/doc/guides/prog_guide/power_man.rst
> b/doc/guides/prog_guide/power_man.rst
> index f6674efe2d..91358b04f3 100644
> --- a/doc/guides/prog_guide/power_man.rst
> +++ b/doc/guides/prog_guide/power_man.rst
> @@ -107,6 +107,25 @@ User Cases
>  The power management mechanism is used to save power when performing L3
> forwarding.
>
>
> +PM QoS
> +------
> +
> +The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> +interface is used to set and get the resume latency limit on the cpuX
> +for userspace. Each cpuidle governor in Linux select which idle state
> +to enter based on this CPU resume latency in their idle task.
> +
> +The deeper the idle state, the lower the power consumption, but the
> +longer the resume time. Some service are latency sensitive and very
> +except the low resume time, like interrupt packet receiving mode.
> +
> +Applications can set and get the CPU resume latency by the
> +``rte_power_qos_set_cpu_resume_latency()`` and
> +``rte_power_qos_get_cpu_resume_latency()``
> +respectively. Applications can set a strict resume latency (zero value)
> +by the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume
> +latency and get better performance (instead, the power consumption of platform
> may increase).
> +
> +
>  Ethernet PMD Power Management API
>  ---------------------------------
>
> diff --git a/doc/guides/rel_notes/release_24_11.rst
> b/doc/guides/rel_notes/release_24_11.rst
> index fa4822d928..d9e268274b 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -237,6 +237,11 @@ New Features
>    This field is used to pass an extra configuration settings such as ability
>    to lookup IPv4 addresses in network byte order.
>
> +* **Introduce per-CPU PM QoS interface.**
> +
> +  * Add per-CPU PM QoS interface to low the resume latency when wake up from
> +    idle state.
> +
>  * **Added new API to register telemetry endpoint callbacks with private
> arguments.**
>
>    A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque
> value to diff --git a/lib/power/meson.build b/lib/power/meson.build index
> 2f0f3d26e9..9b5d3e8315 100644
> --- a/lib/power/meson.build
> +++ b/lib/power/meson.build
> @@ -23,12 +23,14 @@ sources = files(
>          'rte_power.c',
>          'rte_power_uncore.c',
>          'rte_power_pmd_mgmt.c',
> +       'rte_power_qos.c',
>  )
>  headers = files(
>          'rte_power.h',
>          'rte_power_guest_channel.h',
>          'rte_power_pmd_mgmt.h',
>          'rte_power_uncore.h',
> +       'rte_power_qos.h',
>  )
>
>  deps += ['timer', 'ethdev']
> diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c new file mode
> 100644 index 0000000000..4dd0532b36
> --- /dev/null
> +++ b/lib/power/rte_power_qos.c
> @@ -0,0 +1,123 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2024 HiSilicon Limited
> + */
> +
> +#include <errno.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +
> +#include "power_common.h"
> +#include "rte_power_qos.h"
> +
> +#define PM_QOS_SYSFILE_RESUME_LATENCY_US       \
> +       "/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
> +
> +#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN      32
> +
> +int
> +rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency) {
> +       char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
> +       uint32_t cpu_id;
> +       FILE *f;
> +       int ret;
> +
> +       if (!rte_lcore_is_enabled(lcore_id)) {
> +               POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
> +               return -EINVAL;
> +       }
> +       ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
> +       if (ret != 0)
> +               return ret;
> +
> +       if (latency < 0) {
> +               POWER_LOG(ERR, "latency should be greater than and equal to 0");
> +               return -EINVAL;
> +       }
> +
> +       ret = open_core_sysfs_file(&f, "w",
> PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
> +       if (ret != 0) {
> +               POWER_LOG(ERR, "Failed to open
> "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +                         cpu_id, strerror(errno));
> +               return ret;
> +       }
> +
> +       /*
> +        * Based on the sysfs interface pm_qos_resume_latency_us under
> +        * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their
> meaning
> +        * is as follows for different input string.
> +        * 1> the resume latency is 0 if the input is "n/a".
> +        * 2> the resume latency is no constraint if the input is "0".
> +        * 3> the resume latency is the actual value to be set.
> +        */
> +       if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
> +               snprintf(buf, sizeof(buf), "%s", "n/a");
> +       else if (latency ==
> RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
> +               snprintf(buf, sizeof(buf), "%u", 0);
> +       else
> +               snprintf(buf, sizeof(buf), "%u", latency);
> +
> +       ret = write_core_sysfs_s(f, buf);
> +       if (ret != 0)
> +               POWER_LOG(ERR, "Failed to write
> "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +                         cpu_id, strerror(errno));
> +
> +       fclose(f);
> +
> +       return ret;
> +}
> +
> +int
> +rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id) {
> +       char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
> +       int latency = -1;
> +       uint32_t cpu_id;
> +       FILE *f;
> +       int ret;
> +
> +       if (!rte_lcore_is_enabled(lcore_id)) {
> +               POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
> +               return -EINVAL;
> +       }
> +       ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
> +       if (ret != 0)
> +               return ret;
> +
> +       ret = open_core_sysfs_file(&f, "r",
> PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
> +       if (ret != 0) {
> +               POWER_LOG(ERR, "Failed to open
> "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +                         cpu_id, strerror(errno));
> +               return ret;
> +       }
> +
> +       ret = read_core_sysfs_s(f, buf, sizeof(buf));
> +       if (ret != 0) {
> +               POWER_LOG(ERR, "Failed to read
> "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
> +                         cpu_id, strerror(errno));
> +               goto out;
> +       }
> +
> +       /*
> +        * Based on the sysfs interface pm_qos_resume_latency_us under
> +        * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their
> meaning
> +        * is as follows for different output string.
> +        * 1> the resume latency is 0 if the output is "n/a".
> +        * 2> the resume latency is no constraint if the output is "0".
> +        * 3> the resume latency is the actual value in used for other string.
> +        */
> +       if (strcmp(buf, "n/a") == 0)
> +               latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
> +       else {
> +               latency = strtoul(buf, NULL, 10);
> +               latency = latency == 0 ?
> RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
> +       }
> +
> +out:
> +       fclose(f);
> +
> +       return latency != -1 ? latency : ret; }
> diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h new file mode
> 100644 index 0000000000..7a8dab9272
> --- /dev/null
> +++ b/lib/power/rte_power_qos.h
> @@ -0,0 +1,73 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2024 HiSilicon Limited
> + */
> +
> +#ifndef RTE_POWER_QOS_H
> +#define RTE_POWER_QOS_H
> +
> +#include <stdint.h>
> +
> +#include <rte_compat.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @file rte_power_qos.h
> + *
> + * PM QoS API.
> + *
> + * The CPU-wide resume latency limit has a positive impact on this
> +CPU's idle
> + * state selection in each cpuidle governor.
> + * Please see the PM QoS on CPU wide in the following link:
> + *
> +https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?hig
> +hlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-lat
> +ency-us
> + *
> + * The deeper the idle state, the lower the power consumption, but the
> + * longer the resume time. Some service are delay sensitive and very
> +except the
> + * low resume time, like interrupt packet receiving mode.
> + *
> + * In these case, per-CPU PM QoS API can be used to control this CPU's
> +idle
> + * state selection and limit just enter the shallowest idle state to
> +low the
> + * delay after sleep by setting strict resume latency (zero value).
> + */
> +
> +#define RTE_POWER_QOS_STRICT_LATENCY_VALUE             0
> +#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
> INT32_MAX
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param lcore_id
> + *   target logical core id
> + *
> + * @param latency
> + *   The latency should be greater than and equal to zero in microseconds unit.
> + *
> + * @return
> + *   0 on success. Otherwise negative value is returned.
> + */
> +__rte_experimental
> +int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int
> +latency);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the current resume latency of this logical core.
> + * The default value in kernel is @see
> +RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
> + * if don't set it.
> + *
> + * @return
> + *   Negative value on failure.
> + *   >= 0 means the actual resume latency limit on this core.
> + */
> +__rte_experimental
> +int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* RTE_POWER_QOS_H */
> diff --git a/lib/power/version.map b/lib/power/version.map index
> c9a226614e..08f178a39d 100644
> --- a/lib/power/version.map
> +++ b/lib/power/version.map
> @@ -51,4 +51,8 @@ EXPERIMENTAL {
>         rte_power_set_uncore_env;
>         rte_power_uncore_freqs;
>         rte_power_unset_uncore_env;
> +
> +       # added in 24.11
> +       rte_power_qos_get_cpu_resume_latency;
> +       rte_power_qos_set_cpu_resume_latency;
>  };
> --
> 2.22.0

Acked-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>

^ permalink raw reply	[relevance 0%]

* [PATCH v29 13/13] doc: add release note about log library
  @ 2024-10-25 21:45  4%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-25 21:45 UTC (permalink / raw)
  To: dev
  Cc: Stephen Hemminger, Morten Brørup, Bruce Richardson, Chengwen Feng

Significant enough to warrant a release note.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/rel_notes/release_24_11.rst | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..1d2e60231b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -349,6 +349,26 @@ API Changes
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
 
+* **Logging library changes**
+
+  * The log is initialized earlier in startup so all messages go through the library.
+
+  * Added a new option to timestamp log messages, which is useful for
+    debugging delays in application and driver startup.
+
+  * If the application is a systemd service and the log output is being
+    sent to standard error then DPDK will switch to journal native protocol.
+    This allows the more data such as severity to be sent.
+
+  * The syslog option has changed. By default, messages are no longer sent
+    to syslog unless the *--syslog* option is specified.
+    Syslog is also now now supported on FreeBSD (but not on Windows).
+
+  * Log messages can be timestamped with *--log-timestamp* option.
+
+  * Log messages can be colorized with the *--log-color* option.
+
+
 ABI Changes
 -----------
 
-- 
2.45.2


^ permalink raw reply	[relevance 4%]

* Re: [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port
  2024-10-18  2:48  0%       ` lihuisong (C)
@ 2024-10-26  4:11  0%         ` lihuisong (C)
  2024-10-29 22:12  0%         ` Ferruh Yigit
  1 sibling, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-26  4:11 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, fengchengwen, liuyonglong, thomas, andrew.rybchenko

Hi Ferruh,


在 2024/10/18 10:48, lihuisong (C) 写道:
> Hi Ferruh,
>
> Thanks for your considering again. please see reply inline.
>
> 在 2024/10/18 9:04, Ferruh Yigit 写道:
>> On 10/8/2024 3:32 AM, lihuisong (C) wrote:
>>> Hi Thomas and Ferruh,
>>>
>>> We've discussed it on and off a few times, and we've reached some
>>> consensus.
>>> They've been going through more than 2 years😅
>>> Can you have a look at this series again?
>>> If we really don't need it, I will drop it from my upstreaming list.
>>>
>> Hi Huisong,
>>
>> I was not really convinced with the patch series, but did not want to
>> block it outright, sorry that this caused patch series stay around.
>>
>> As checked again, still feels like adding unnecessary complexity, and I
>> am for rejecting this series.
>>
>> Overall target is to be able to support hotplug with primary/secondary
>> process, and uses event handlers for this but this requires adding a new
>> ethdev state to be able iterate over devices etc...
>> Perhaps better way to support this without relying on event handlers.
> Ignoring the modification of tesptmd is ok to me.
> But we need to restrict testpmd not to support attach and detach port 
> in multiple process case.
> Otherwise. these issues this series solved will be encountered.
>
> BTW, I want to say the patch [2/5] which introduced 
> RTE_ETH_DEV_ALLOCATED should be thought again.
> Because it is an real issue in ethdev layer. This is also the fruit 
> that Thomas, you and I discussed before.
> Please look at this patch again.
Can you please take a look at my above reply?
>
> /Huisong
>>
>>
>>> /Huisong
>>>
>>>
>>> 在 2024/9/29 13:52, Huisong Li 写道:
>>>> This patchset fix some bugs and support attaching and detaching port
>>>> in primary and secondary.
>>>>
>>>> ---
>>>>    -v7: fix conflicts
>>>>    -v6: adjust rte_eth_dev_is_used position based on alphabetical 
>>>> order
>>>>         in version.map
>>>>    -v5: move 'ALLOCATED' state to the back of 'REMOVED' to avoid abi
>>>> break.
>>>>    -v4: fix a misspelling.
>>>>    -v3:
>>>>      #1 merge patch 1/6 and patch 2/6 into patch 1/5, and add 
>>>> modification
>>>>         for other bus type.
>>>>      #2 add a RTE_ETH_DEV_ALLOCATED state in rte_eth_dev_state to 
>>>> resolve
>>>>         the probelm in patch 2/5.
>>>>    -v2: resend due to CI unexplained failure.
>>>>
>>>> Huisong Li (5):
>>>>     drivers/bus: restore driver assignment at front of probing
>>>>     ethdev: fix skip valid port in probing callback
>>>>     app/testpmd: check the validity of the port
>>>>     app/testpmd: add attach and detach port for multiple process
>>>>     app/testpmd: stop forwarding in new or destroy event
>>>>
>>>>    app/test-pmd/testpmd.c                   | 47 
>>>> +++++++++++++++---------
>>>>    app/test-pmd/testpmd.h                   |  1 -
>>>>    drivers/bus/auxiliary/auxiliary_common.c |  9 ++++-
>>>>    drivers/bus/dpaa/dpaa_bus.c              |  9 ++++-
>>>>    drivers/bus/fslmc/fslmc_bus.c            |  8 +++-
>>>>    drivers/bus/ifpga/ifpga_bus.c            | 12 ++++--
>>>>    drivers/bus/pci/pci_common.c             |  9 ++++-
>>>>    drivers/bus/vdev/vdev.c                  | 10 ++++-
>>>>    drivers/bus/vmbus/vmbus_common.c         |  9 ++++-
>>>>    drivers/net/bnxt/bnxt_ethdev.c           |  3 +-
>>>>    drivers/net/bonding/bonding_testpmd.c    |  1 -
>>>>    drivers/net/mlx5/mlx5.c                  |  2 +-
>>>>    lib/ethdev/ethdev_driver.c               | 13 +++++--
>>>>    lib/ethdev/ethdev_driver.h               | 12 ++++++
>>>>    lib/ethdev/ethdev_pci.h                  |  2 +-
>>>>    lib/ethdev/rte_class_eth.c               |  2 +-
>>>>    lib/ethdev/rte_ethdev.c                  |  4 +-
>>>>    lib/ethdev/rte_ethdev.h                  |  4 +-
>>>>    lib/ethdev/version.map                   |  1 +
>>>>    19 files changed, 114 insertions(+), 44 deletions(-)
>>>>
>> .

^ permalink raw reply	[relevance 0%]

* [PATCH v30 13/13] doc: add release note about log library
  @ 2024-10-27 17:24  4%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-10-27 17:24 UTC (permalink / raw)
  To: dev
  Cc: Stephen Hemminger, Morten Brørup, Bruce Richardson, Chengwen Feng

Significant enough to warrant a release note.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/rel_notes/release_24_11.rst | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 53a5ffebe5..b96042ea14 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -353,6 +353,26 @@ API Changes
   and replaced it with a new shared devarg ``llq_policy`` that keeps the same logic.
 
 
+* **Logging library changes**
+
+  * The log is initialized earlier in startup so all messages go through the library.
+
+  * Added a new option to timestamp log messages, which is useful for
+    debugging delays in application and driver startup.
+
+  * If the application is a systemd service and the log output is being
+    sent to standard error then DPDK will switch to journal native protocol.
+    This allows the more data such as severity to be sent.
+
+  * The syslog option has changed. By default, messages are no longer sent
+    to syslog unless the *--syslog* option is specified.
+    Syslog is also supported on FreeBSD (but not on Windows).
+
+  * Log messages can be timestamped with *--log-timestamp* option.
+
+  * Log messages can be colorized with the *--log-color* option.
+
+
 ABI Changes
 -----------
 
-- 
2.45.2


^ permalink raw reply	[relevance 4%]

* Re: [PATCH] [RFC] cryptodev: replace LIST_END enumerators with APIs
  @ 2024-10-28 10:12  4%       ` Dodji Seketeli
  0 siblings, 0 replies; 169+ results
From: Dodji Seketeli @ 2024-10-28 10:12 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Akhil Goyal, dev, thomas, david.marchand, hemant.agrawal, anoobj,
	pablo.de.lara.guarch, fiona.trahe, declan.doherty, matan,
	g.singh, fanzhang.oss, jianjay.zhou, asomalap, ruifeng.wang,
	konstantin.v.ananyev, radu.nicolau, ajit.khaparde, rnagadheeraj,
	mdr

Hello,

Ferruh Yigit <ferruh.yigit@amd.com> writes:

[...]

>> This change cause the value of the the FOOD_END enumerator to increase.
>> And that increase might be problematic.  At the moment, for it being
>> problematic or not has to be the result of a careful review.
>> 
>
> As you said, FOOD_END value change can be sometimes problematic, but
> sometimes it is not.
> This what I referred as limitation that tool is not reporting only
> problematic case, but require human review.

Oh, I see. Thank you for clarifying.

> (btw, this is a very useful tool, I don't want to sound like negative
> about it, only want to address this recurring subject in dpdk.)

No problem, I never assume you mean anything negative :-)

[...]


>> So, by default, abidiff will complain by saying that the value of
>> FOO_END was changed.
>> 
>> But you, as a community of practice, can decide that this kind of change
>> to the value of the last enumerator is not a problematic change, after
>> careful review of your code and practice.  You thus can specify that
>> the tool using a suppression specification which has the following
>> syntax:
>> 
>>     [suppress_type]
>>       type_kind = enum
>>       changed_enumerators = FOO_END, ANOTHER_ENUM_END, AND_ANOTHER_ENUM_END
>> 
>> or, alternatively, you can specify the enumerators you want to suppress
>> the changes for as a list of regular expressions:
>> 
>>     [suppress_type]
>>       type_kind = enum
>>       changed_enumerators_regexp = .*_END$, .*_LAST$, .*_LIST_END$
>> 
>> Wouldn't that be enough to address your use case here (honest question)?
>> 
>
> We are already using suppress feature in dpdk.
>
> But difficulty is to decide if END (MAX) enum value change warning is an
> actual ABI break or not.
>
> When tool gives warning, tendency is to just make warning go away,
> mostly by removing END (MAX) enum without really analyzing if it is a
> real ABI break.

I see.

[...]

>>> [1] It would be better if tool gives END (MAX) enum value warnings only
>>> if it is exchanged in an API, but not sure if this can be possible to
>>> detect.
>> 
>> I believe that if you want to know if an enumerator value is *USED* by a
>> type (which I believe is at the root of what you are alluding to), then
>> you would need a static analysis tool that works at the source level.
>> Or, you need a human review of the code once the binary analysis tool
>> told you that that value of the enumerator changed.
>> 
>> Why ? please let me give you an example:
>> 
>>     enum foo_enum
>>     {
>>      FOO_FIRST,
>>      FOO_SECOND,
>>      FOO_END
>>     };
>> 
>>     int array[FOO_END];
>> 
>> Once this is compiled into binary, what libabigail is going to see by
>> analyzing the binary is that 'array' is an array of 2 integers.  The
>> information about the size of the array being initially an enumerator
>> value is lost.  To detect that, you need source level analysis.
>> 
>
> I see the problem.
>
> Is this the main reason that changing FOO_END value reported as warning?

Yes, it is because of this class of issues.

Actually if ANY enumerator value is changed, that is actually an ABI
change.  And that ABI change is either compatible or not.

> If we forbid this kind of usage of the FOO_END, can we ignore this
> warning safely?

I would think so.


But then, you'd have to also forbid the use of all enumerators,
basically.  I am not sure that would be practical.

Rather I would tend to lean toward reviewing the use of enumerators, on
a case by case basis, using tools like 'grep' and whatnot.

What I would advise to forbid is the use of complicated macros or
constructs that makes the review of the use of enumerators
non-practical.  You should be able to grep "FOO_END" and see where it's
used in the source code.  Reviewing that shouldn't take more than a few
minutes whenever a tool warns about the change of its value.

>
>> But then, by reviewing the code, this is a construct that you can spot
>> and allow or forbid, depending on your goals as a project.
>> 
>> [...]
>> 
>> Cheers,
>> 
>

-- 
		Dodji


^ permalink raw reply	[relevance 4%]

* Re: [EXTERNAL] Re: [PATCH] [RFC] cryptodev: replace LIST_END enumerators with APIs
  @ 2024-10-28 10:55  4%       ` Dodji Seketeli
  0 siblings, 0 replies; 169+ results
From: Dodji Seketeli @ 2024-10-28 10:55 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Dodji Seketeli, Ferruh Yigit, dev, thomas, david.marchand,
	hemant.agrawal, Anoob Joseph, pablo.de.lara.guarch, fiona.trahe,
	declan.doherty, matan, g.singh, fanzhang.oss, jianjay.zhou,
	asomalap, ruifeng.wang, konstantin.v.ananyev, radu.nicolau,
	ajit.khaparde, Nagadheeraj Rottela, mdr

Hello,

Akhil Goyal <gakhil@marvell.com> writes:

[...]


>> I believe that if you want to know if an enumerator value is *USED* by a
>> type (which I believe is at the root of what you are alluding to), then
>> you would need a static analysis tool that works at the source level.
>> Or, you need a human review of the code once the binary analysis tool
>> told you that that value of the enumerator changed.
>> 
>> Why ? please let me give you an example:
>> 
>>     enum foo_enum
>>     {
>>      FOO_FIRST,
>>      FOO_SECOND,
>>      FOO_END
>>     };
>> 
>>     int array[FOO_END];
>> 
>> Once this is compiled into binary, what libabigail is going to see by
>> analyzing the binary is that 'array' is an array of 2 integers.  The
>> information about the size of the array being initially an enumerator
>> value is lost.  To detect that, you need source level analysis.
>> 
>> But then, by reviewing the code, this is a construct that you can spot
>> and allow or forbid, depending on your goals as a project.
>> 
> In the above example if in newer library a FOO_THIRD is added.
> FOO_END value will change and will cause ABI break for change in existing value.
> But if we replace it with inline function to get the list_end and use it in array like below.
> So if FOO_THIRD is added, we will also update foo_enum_list_end() function to return (FOO_THIRD+1)
>
>      enum foo_enum
>      {
>       FOO_FIRST,
>       FOO_SECOND,
>      };
>      static inline int foo_enum_list_end()
>      {
>             return FOO_SECOND + 1;
>      }
>      int array[foo_enum_list_end()];
>
> Will this cause an ABI break if we add this array in application or in library?

I think this (inline function) construct is essentially the same as just
adding a FOO_END enumerator after FOO_SECOND.  Using either
foo_enum_list_end() or FOO_END result in having the value '2' in the
application using the library to get FOO_END or foo_enum_list_end().

Newer versions of the library being linked to the application won't
change that value '2', regardless of the newer values of FOO_END or
foo_enum_list_end().

So, adding a FOO_THIRD right after FOO_END, induces and ABI change.

This change being "breaking" (incompatible) or not, really depends on
what the application expects, I would say.  Sorry if that looks "vague",
but this whole concept is quite blurry.

For instance, if you add FOO_THIRD after FOO_SECOND in the newer version
of the library and the application still gets the value '2' rather than
getting the value '3', and that value is actually multiplied by "two
trillions" in the application to get the value of the dividend to be
payed to investors, then, then that's a very big error induced by that
change.  That might be considered by the application as a "breaking" ABI
change and you might get a call or two from the CEO of an S&P500 company
that uses the library.

Other applications might consider that "off-by-one" error not being
problematic at all and thus might consider it not "breaking".

Note that REMOVING an enumerator however is always considered an
incompatible (breaking) ABI change.

Adding an enumerator however has this annoying "grey topic" (not black or
white) property that I am not sure how to address at this point.

Cheers,

-- 
		Dodji


^ permalink raw reply	[relevance 4%]

* Re: [EXTERNAL] Re: [PATCH] [RFC] cryptodev: replace LIST_END enumerators with APIs
  @ 2024-10-28 11:15  3%         ` Dodji Seketeli
  0 siblings, 0 replies; 169+ results
From: Dodji Seketeli @ 2024-10-28 11:15 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Ferruh Yigit, David Marchand, dev, Dodji Seketeli, thomas,
	hemant.agrawal, Anoob Joseph, pablo.de.lara.guarch, fiona.trahe,
	declan.doherty, matan, g.singh, fanzhang.oss, jianjay.zhou,
	asomalap, ruifeng.wang, konstantin.v.ananyev, radu.nicolau,
	ajit.khaparde, Nagadheeraj Rottela, mdr

Akhil Goyal <gakhil@marvell.com> writes:

>> >>> Now added inline APIs for getting the list end which need to be updated
>> >>> for each new entry to the enum. This shall help in avoiding ABI break
>> >>> for adding new algo.
>> >>>
>> >>
>> >> Hi Akhil,
>> >>
>> >> *I think* this hides the problem instead of fixing it, and this may be
>> >> partially because of the tooling (libabigail) limitation.
>> >>
>> >> This patch prevents the libabigail warning, true, but it doesn't improve
>> >> anything from the application's perspective.
>> >> Before or after this patch, application knows a fixed value as END value.
>> >>
>> >> Not all changes in the END (MAX) enum values cause ABI break, but tool
>> >> warns on all, that is why I think this may be tooling limitation [1].
>> >> (Just to stress, I am NOT talking about regular enum values change, I am
>> >> talking about only END (MAX) value changes caused by appending new enum
>> >> items.)
>> >>
>> >> As far as I can see (please Dodji, David correct me if I am wrong) ABI
>> >> break only happens if application and library exchange enum values in
>> >> the API (directly or within a struct).
>> >
>> > - There is also the following issue:
>> > A library publicly exports an array sized against a END (MAX) enum in the API.
>> > https://developers.redhat.com/blog/2019/05/06/how-c-array-sizes-become-part-of-the-binary-interface-of-a-library
>> >
>> 
>> I see. And Dodji explained this requires source code to detect.
>> 
>> I don't remember seeing a public array whose size is defined by an enum,
>> are you aware of any instance of this usage?
>
> https://patches.dpdk.org/project/dpdk/patch/20241009071151.1106-1-gmuthukrishn@marvell.com/
> This was merged yesterday.

I guess the problematic piece of the code is this:

    diff --git a/lib/cryptodev/rte_cryptodev.h
    b/lib/cryptodev/rte_cryptodev.h
    index bec947f6d5..aa6ef3a94d 100644
    --- a/lib/cryptodev/rte_cryptodev.h
    +++ b/lib/cryptodev/rte_cryptodev.h
    @@ -185,6 +185,9 @@  struct rte_cryptodev_asymmetric_xform_capability {
               * Value 0 means unavailable, and application should pass the
               required
                     * random value. Otherwise, PMD would internally compute
                     the random number.
                          */
    +
    +               uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END];
    +                        /**< Operation specific capabilities. */
                             };


Is it possible for the struct rte_cryptodev_asymmetric_xform_capability
to be made an opaque struct which definition would be present only in a
.c file of the library?

Its data members would then be retrieved by getter functions that take a
pointer to that struct in parameter.

That way, the uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END] data member
would be "private" to the .c file and thus would not be part of the
ABI.  Any change to the RTE_CRYPTO_ASYM_OP enum would then become
harmless to that struct.

I hope this helps.

-- 
		Dodji


^ permalink raw reply	[relevance 3%]

* RE: release candidate 24.11-rc1
  2024-10-18 21:47  4% release candidate 24.11-rc1 Thomas Monjalon
@ 2024-10-29 10:19  0% ` Xu, HailinX
  2024-10-29 19:31  0% ` Thinh Tran
  1 sibling, 0 replies; 169+ results
From: Xu, HailinX @ 2024-10-29 10:19 UTC (permalink / raw)
  To: Thomas Monjalon, dev
  Cc: Kovacevic, Marko, Mcnamara, John, Richardson, Bruce, Ferruh Yigit

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Saturday, October 19, 2024 5:47 AM
> To: announce@dpdk.org
> Subject: release candidate 24.11-rc1
> 
> A new DPDK release candidate is ready for testing:
> 	https://git.dpdk.org/dpdk/tag/?id=v24.11-rc1
> 
> There are 630 new patches in this snapshot, including many API/ABI
> compatibility breakages.
> This release won't be ABI-compatible with previous ones.
> 
> Release notes:
> 	https://doc.dpdk.org/guides/rel_notes/release_24_11.html
> 
> Highlights of 24.11-rc1:
> 	- bit set and atomic bit manipulation
> 	- IPv6 address API
> 	- Ethernet link lanes
> 	- flow table index action
> 	- Cisco enic VF
> 	- Marvell CN20K
> 	- symmetric crypto SM4
> 	- asymmetric crypto EdDSA
> 	- event device pre-scheduling
> 	- event device independent enqueue
> 
> Please test and report issues on bugs.dpdk.org.
> 
> Few more new APIs may be added in -rc2.
> DPDK 24.11-rc2 is expected in more than two weeks (early November).
> 
> Thank you everyone
> 
Update the test status for Intel part. dpdk24.11-rc1 all test is done. found four new issues.

New issues:
1. [dpdk-24.11]ice_fdir/mac_ipv6_udp: match to an irregular message    -> Intel dev is under investigating
2. [DPDK-24.11.0-RC1] cryptodev_qat_asym_autotest is failing    -> Intel dev is under investigating
3. cpfl_vf_representor_rte_flow/split_queue_mac_ipv4_udp_vf_to_io: rule create failed    -> Intel dev is under investigating
4. [DPDK-24.11] E830 200G port can't up when starting testpmd    -> Intel dev is under investigating

# Basic Intel(R) NIC testing
* Build or compile:  
 *Build: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu24.10, Ubuntu24.04.1, Fedora40, RHEL8.10 RHEL9.4, FreeBSD14.1, SUSE15.6, OpenAnolis8.9, AzureLinux 3.0 etc.
  - All test passed.
 *Compile: cover the CFLAGES(O0/O1/O2/O3) with popular OS such as Ubuntu24.04.1 and RHEL9.4.
  - All test passed with latest dpdk.
* PF/VF(i40e, ixgbe): test scenarios including PF/VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. 
	- All test case is done. No new issue is found.
* PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc.
	- Execution rate is done. found the 1 issue.
* CPF/APF(MEV): test scenarios including APF-HOST,CPF-HOST,CPF-ACC,cpfl_rte_flow/MTU/Jumboframe/checksum offload, etc.
	- Execution rate is done. found the 3 issue.
* Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, RFC2544 Zero packet loss performance test, etc.
	- Execution rate is done. No new issue is found.
* Power and IPsec: 
 * Power: test scenarios including bi-direction/Telemetry/Empty Poll Lib/Priority Base Frequency, etc. 
	- Execution rate is done. No new issue is found.
 * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc.
	- Execution rate is done. No new issue is found. 
# Basic cryptodev and virtio testing
* Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0U1, etc.
	- Execution rate is done. No new issue is found.
* Cryptodev: 
 *Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc.
	- Execution rate is done. found the 2 issue. 
 *Performance test: test scenarios including Throughput Performance /Cryptodev Latency, etc.
	- Execution rate is done. No performance drop.

Regards,
Xu, Hailin

^ permalink raw reply	[relevance 0%]

* [PATCH v14 0/3] power: introduce PM QoS interface
                     ` (3 preceding siblings ...)
  2024-10-25  9:18  4% ` [PATCH v13 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-10-29 13:28  4% ` Huisong Li
  2024-10-29 13:28  5%   ` [PATCH v14 1/3] power: introduce PM QoS API on CPU wide Huisong Li
  2024-11-04  9:13  0%   ` [PATCH v14 0/3] power: introduce PM QoS interface lihuisong (C)
  2024-11-11  2:25  4% ` [PATCH v15 " Huisong Li
  2024-11-11  9:14  4% ` [RESEND PATCH " Huisong Li
  6 siblings, 2 replies; 169+ results
From: Huisong Li @ 2024-10-29 13:28 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v14:
  - use parse_uint to parse --cpu-resume-latency instead of adding a new
    parse_int()
 v13:
  - not allow negative value for --cpu-resume-latency.
  - restore to the original value as Konstantin suggested.
 v12:
  - add Acked-by Chengwen and Konstantin
  - fix overflow issue in l3fwd-power when parse command line
  - add a command parameter to set CPU resume latency
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide


Huisong Li (3):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: fix data overflow when parse command line
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst           |  19 +++
 doc/guides/rel_notes/release_24_11.rst        |   5 +
 .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
 examples/l3fwd-power/main.c                   |  96 +++++++++++---
 lib/power/meson.build                         |   2 +
 lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
 lib/power/rte_power_qos.h                     |  73 +++++++++++
 lib/power/version.map                         |   4 +
 8 files changed, 306 insertions(+), 21 deletions(-)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* [PATCH v14 1/3] power: introduce PM QoS API on CPU wide
  2024-10-29 13:28  4% ` [PATCH v14 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-10-29 13:28  5%   ` Huisong Li
  2024-11-04  9:13  0%   ` [PATCH v14 0/3] power: introduce PM QoS interface lihuisong (C)
  1 sibling, 0 replies; 169+ results
From: Huisong Li @ 2024-10-29 13:28 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index f6674efe2d..91358b04f3 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index fa4822d928..d9e268274b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -237,6 +237,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 2f0f3d26e9..9b5d3e8315 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -23,12 +23,14 @@ sources = files(
         'rte_power.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+	'rte_power_qos.c',
 )
 headers = files(
         'rte_power.h',
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+	'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..4dd0532b36
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..7a8dab9272
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE		0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT	INT32_MAX
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index c9a226614e..08f178a39d 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,4 +51,8 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* Re: [PATCH V3 7/7] mlx5: add backward compatibility for RDMA monitor
  @ 2024-10-29 16:26  3%     ` Stephen Hemminger
  2024-10-30  8:25  0%       ` Minggang(Gavin) Li
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-10-29 16:26 UTC (permalink / raw)
  To: Minggang Li(Gavin)
  Cc: viacheslavo, matan, orika, thomas, Dariusz Sosnowski, Bing Zhao,
	Suanming Mou, dev, rasland

On Tue, 29 Oct 2024 15:42:56 +0200
"Minggang Li(Gavin)" <gavinl@nvidia.com> wrote:

>  
> +* **Updated NVIDIA mlx5 driver.**
> +
> +  Optimized port probe in large scale.
> +  This feature enhances the efficiency of probing VF/SFs on a large scale
> +  by significantly reducing the probing time. To activate this feature,
> +  set ``probe_opt_en`` to a non-zero value during device probing. It
> +  leverages a capability from the RDMA driver, expected to be released in
> +  the upcoming kernel version 6.13 or its equivalent in OFED 24.10,
> +  specifically the RDMA monitor. For additional details on the limitations
> +  of devargs, refer to "doc/guides/nics/mlx5.rst".
> +
> +  If there are lots of VFs/SFs to be probed by the application, eg, 300
> +  VFs/SFs, the option should be enabled to save probing time.

IMHO the kernel parts have to be available in a released kernel version.
Otherwise the kernel API/ABI is not stable and there is a possibility of user confusion.

This needs to stay in "awaiting upstream" state until kernel is released

^ permalink raw reply	[relevance 3%]

* Re: release candidate 24.11-rc1
  2024-10-18 21:47  4% release candidate 24.11-rc1 Thomas Monjalon
  2024-10-29 10:19  0% ` Xu, HailinX
@ 2024-10-29 19:31  0% ` Thinh Tran
  1 sibling, 0 replies; 169+ results
From: Thinh Tran @ 2024-10-29 19:31 UTC (permalink / raw)
  To: Thomas Monjalon, dpdk-dev

IBM - Power Systems
DPDK v24.11-rc1-6-g90cb8ff819


* Build CI on Fedora 38,39,40 container images for ppc64le
* Basic PF on Mellanox: No issue found
* Performance: not tested.
* OS: RHEL 9.4  kernel: 5.14.0-427.40.1.el9_4.ppc64le
         with gcc version 11.4.1 20231218 (Red Hat 11.4.1-3) (GCC)
       SLES15 SP5  kernel: 5.14.21-150500.55.49-default
         with gcc version 13.2.1 20230912 (SUSE Linux)

Systems tested:
  - LPARs on IBM Power10 CHRP IBM,9105-22A
     NICs:
     - Mellanox Mellanox Technologies MT2894 Family [ConnectX-6 Lx]
     - firmware version: 26.42.1000
     - MLNX_OFED_LINUX-24.07-0.6.1.5

Thinh Tran

On 10/18/2024 4:47 PM, Thomas Monjalon wrote:
> A new DPDK release candidate is ready for testing:
> 	https://git.dpdk.org/dpdk/tag/?id=v24.11-rc1
> 
> There are 630 new patches in this snapshot,
> including many API/ABI compatibility breakages.
> This release won't be ABI-compatible with previous ones.
> 
> Release notes:
> 	https://doc.dpdk.org/guides/rel_notes/release_24_11.html
> 
> Highlights of 24.11-rc1:
> 	- bit set and atomic bit manipulation
> 	- IPv6 address API
> 	- Ethernet link lanes
> 	- flow table index action
> 	- Cisco enic VF
> 	- Marvell CN20K
> 	- symmetric crypto SM4
> 	- asymmetric crypto EdDSA
> 	- event device pre-scheduling
> 	- event device independent enqueue
> 
> Please test and report issues on bugs.dpdk.org.
> 
> Few more new APIs may be added in -rc2.
> DPDK 24.11-rc2 is expected in more than two weeks (early November).
> 
> Thank you everyone
> 
> 


^ permalink raw reply	[relevance 0%]

* Re: [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port
  2024-10-18  2:48  0%       ` lihuisong (C)
  2024-10-26  4:11  0%         ` lihuisong (C)
@ 2024-10-29 22:12  0%         ` Ferruh Yigit
  2024-10-30  4:06  0%           ` lihuisong (C)
  1 sibling, 1 reply; 169+ results
From: Ferruh Yigit @ 2024-10-29 22:12 UTC (permalink / raw)
  To: lihuisong (C), thomas, andrew.rybchenko, Stephen Hemminger
  Cc: dev, fengchengwen, liuyonglong

On 10/18/2024 3:48 AM, lihuisong (C) wrote:
> Hi Ferruh,
> 
> Thanks for your considering again. please see reply inline.
> 
> 在 2024/10/18 9:04, Ferruh Yigit 写道:
>> On 10/8/2024 3:32 AM, lihuisong (C) wrote:
>>> Hi Thomas and Ferruh,
>>>
>>> We've discussed it on and off a few times, and we've reached some
>>> consensus.
>>> They've been going through more than 2 years😅
>>> Can you have a look at this series again?
>>> If we really don't need it, I will drop it from my upstreaming list.
>>>
>> Hi Huisong,
>>
>> I was not really convinced with the patch series, but did not want to
>> block it outright, sorry that this caused patch series stay around.
>>
>> As checked again, still feels like adding unnecessary complexity, and I
>> am for rejecting this series.
>>
>> Overall target is to be able to support hotplug with primary/secondary
>> process, and uses event handlers for this but this requires adding a new
>> ethdev state to be able iterate over devices etc...
>> Perhaps better way to support this without relying on event handlers.
> Ignoring the modification of tesptmd is ok to me.
> But we need to restrict testpmd not to support attach and detach port in
> multiple process case.
> Otherwise. these issues this series solved will be encountered.
> 
> BTW, I want to say the patch [2/5] which introduced
> RTE_ETH_DEV_ALLOCATED should be thought again.
> Because it is an real issue in ethdev layer. This is also the fruit that
> Thomas, you and I discussed before.
> Please look at this patch again.
> 

RTE_ETH_DEV_ALLOCATED is added to run RTE_ETH_FOREACH_DEV in the event
handler, more specifically on the 'RTE_ETH_EVENT_NEW' event handler, right?
Without testpmd event handler update, what is the reason/usecase for
above ethdev change?

Thomas, Andrew, Stephen, please feel free to chime in.


> /Huisong
>>
>>
>>> /Huisong
>>>
>>>
>>> 在 2024/9/29 13:52, Huisong Li 写道:
>>>> This patchset fix some bugs and support attaching and detaching port
>>>> in primary and secondary.
>>>>
>>>> ---
>>>>    -v7: fix conflicts
>>>>    -v6: adjust rte_eth_dev_is_used position based on alphabetical order
>>>>         in version.map
>>>>    -v5: move 'ALLOCATED' state to the back of 'REMOVED' to avoid abi
>>>> break.
>>>>    -v4: fix a misspelling.
>>>>    -v3:
>>>>      #1 merge patch 1/6 and patch 2/6 into patch 1/5, and add
>>>> modification
>>>>         for other bus type.
>>>>      #2 add a RTE_ETH_DEV_ALLOCATED state in rte_eth_dev_state to
>>>> resolve
>>>>         the probelm in patch 2/5.
>>>>    -v2: resend due to CI unexplained failure.
>>>>
>>>> Huisong Li (5):
>>>>     drivers/bus: restore driver assignment at front of probing
>>>>     ethdev: fix skip valid port in probing callback
>>>>     app/testpmd: check the validity of the port
>>>>     app/testpmd: add attach and detach port for multiple process
>>>>     app/testpmd: stop forwarding in new or destroy event
>>>>
>>>>    app/test-pmd/testpmd.c                   | 47 ++++++++++++++
>>>> +---------
>>>>    app/test-pmd/testpmd.h                   |  1 -
>>>>    drivers/bus/auxiliary/auxiliary_common.c |  9 ++++-
>>>>    drivers/bus/dpaa/dpaa_bus.c              |  9 ++++-
>>>>    drivers/bus/fslmc/fslmc_bus.c            |  8 +++-
>>>>    drivers/bus/ifpga/ifpga_bus.c            | 12 ++++--
>>>>    drivers/bus/pci/pci_common.c             |  9 ++++-
>>>>    drivers/bus/vdev/vdev.c                  | 10 ++++-
>>>>    drivers/bus/vmbus/vmbus_common.c         |  9 ++++-
>>>>    drivers/net/bnxt/bnxt_ethdev.c           |  3 +-
>>>>    drivers/net/bonding/bonding_testpmd.c    |  1 -
>>>>    drivers/net/mlx5/mlx5.c                  |  2 +-
>>>>    lib/ethdev/ethdev_driver.c               | 13 +++++--
>>>>    lib/ethdev/ethdev_driver.h               | 12 ++++++
>>>>    lib/ethdev/ethdev_pci.h                  |  2 +-
>>>>    lib/ethdev/rte_class_eth.c               |  2 +-
>>>>    lib/ethdev/rte_ethdev.c                  |  4 +-
>>>>    lib/ethdev/rte_ethdev.h                  |  4 +-
>>>>    lib/ethdev/version.map                   |  1 +
>>>>    19 files changed, 114 insertions(+), 44 deletions(-)
>>>>
>> .


^ permalink raw reply	[relevance 0%]

* Re: [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port
  2024-10-29 22:12  0%         ` Ferruh Yigit
@ 2024-10-30  4:06  0%           ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2024-10-30  4:06 UTC (permalink / raw)
  To: Ferruh Yigit, thomas, andrew.rybchenko, Stephen Hemminger
  Cc: dev, fengchengwen, liuyonglong


在 2024/10/30 6:12, Ferruh Yigit 写道:
> On 10/18/2024 3:48 AM, lihuisong (C) wrote:
>> Hi Ferruh,
>>
>> Thanks for your considering again. please see reply inline.
>>
>> 在 2024/10/18 9:04, Ferruh Yigit 写道:
>>> On 10/8/2024 3:32 AM, lihuisong (C) wrote:
>>>> Hi Thomas and Ferruh,
>>>>
>>>> We've discussed it on and off a few times, and we've reached some
>>>> consensus.
>>>> They've been going through more than 2 years😅
>>>> Can you have a look at this series again?
>>>> If we really don't need it, I will drop it from my upstreaming list.
>>>>
>>> Hi Huisong,
>>>
>>> I was not really convinced with the patch series, but did not want to
>>> block it outright, sorry that this caused patch series stay around.
>>>
>>> As checked again, still feels like adding unnecessary complexity, and I
>>> am for rejecting this series.
>>>
>>> Overall target is to be able to support hotplug with primary/secondary
>>> process, and uses event handlers for this but this requires adding a new
>>> ethdev state to be able iterate over devices etc...
>>> Perhaps better way to support this without relying on event handlers.
>> Ignoring the modification of tesptmd is ok to me.
>> But we need to restrict testpmd not to support attach and detach port in
>> multiple process case.
>> Otherwise. these issues this series solved will be encountered.
>>
>> BTW, I want to say the patch [2/5] which introduced
>> RTE_ETH_DEV_ALLOCATED should be thought again.
>> Because it is an real issue in ethdev layer. This is also the fruit that
>> Thomas, you and I discussed before.
>> Please look at this patch again.
>>
> RTE_ETH_DEV_ALLOCATED is added to run RTE_ETH_FOREACH_DEV in the event
> handler, more specifically on the 'RTE_ETH_EVENT_NEW' event handler, right?
Yes
> Without testpmd event handler update, what is the reason/usecase for
> above ethdev change?
no testpmd event handler modification, other applications may also 
encounter it.
Please take a  look at the commit of patch 2/5 and the modification in 
patch 3/5.

>
> Thomas, Andrew, Stephen, please feel free to chime in.
>
>
>> /Huisong
>>>
>>>> /Huisong
>>>>
>>>>
>>>> 在 2024/9/29 13:52, Huisong Li 写道:
>>>>> This patchset fix some bugs and support attaching and detaching port
>>>>> in primary and secondary.
>>>>>
>>>>> ---
>>>>>     -v7: fix conflicts
>>>>>     -v6: adjust rte_eth_dev_is_used position based on alphabetical order
>>>>>          in version.map
>>>>>     -v5: move 'ALLOCATED' state to the back of 'REMOVED' to avoid abi
>>>>> break.
>>>>>     -v4: fix a misspelling.
>>>>>     -v3:
>>>>>       #1 merge patch 1/6 and patch 2/6 into patch 1/5, and add
>>>>> modification
>>>>>          for other bus type.
>>>>>       #2 add a RTE_ETH_DEV_ALLOCATED state in rte_eth_dev_state to
>>>>> resolve
>>>>>          the probelm in patch 2/5.
>>>>>     -v2: resend due to CI unexplained failure.
>>>>>
>>>>> Huisong Li (5):
>>>>>      drivers/bus: restore driver assignment at front of probing
>>>>>      ethdev: fix skip valid port in probing callback
>>>>>      app/testpmd: check the validity of the port
>>>>>      app/testpmd: add attach and detach port for multiple process
>>>>>      app/testpmd: stop forwarding in new or destroy event
>>>>>
>>>>>     app/test-pmd/testpmd.c                   | 47 ++++++++++++++
>>>>> +---------
>>>>>     app/test-pmd/testpmd.h                   |  1 -
>>>>>     drivers/bus/auxiliary/auxiliary_common.c |  9 ++++-
>>>>>     drivers/bus/dpaa/dpaa_bus.c              |  9 ++++-
>>>>>     drivers/bus/fslmc/fslmc_bus.c            |  8 +++-
>>>>>     drivers/bus/ifpga/ifpga_bus.c            | 12 ++++--
>>>>>     drivers/bus/pci/pci_common.c             |  9 ++++-
>>>>>     drivers/bus/vdev/vdev.c                  | 10 ++++-
>>>>>     drivers/bus/vmbus/vmbus_common.c         |  9 ++++-
>>>>>     drivers/net/bnxt/bnxt_ethdev.c           |  3 +-
>>>>>     drivers/net/bonding/bonding_testpmd.c    |  1 -
>>>>>     drivers/net/mlx5/mlx5.c                  |  2 +-
>>>>>     lib/ethdev/ethdev_driver.c               | 13 +++++--
>>>>>     lib/ethdev/ethdev_driver.h               | 12 ++++++
>>>>>     lib/ethdev/ethdev_pci.h                  |  2 +-
>>>>>     lib/ethdev/rte_class_eth.c               |  2 +-
>>>>>     lib/ethdev/rte_ethdev.c                  |  4 +-
>>>>>     lib/ethdev/rte_ethdev.h                  |  4 +-
>>>>>     lib/ethdev/version.map                   |  1 +
>>>>>     19 files changed, 114 insertions(+), 44 deletions(-)
>>>>>
>>> .
> .

^ permalink raw reply	[relevance 0%]

* [PATCH 3/3] net/nfp: add support for port identify
  @ 2024-10-30  8:19  6% ` Chaoyong He
    1 sibling, 0 replies; 169+ results
From: Chaoyong He @ 2024-10-30  8:19 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He, James Hershaw

Implement the necessary functions to allow user to visually identify a
physical port associated with a netdev by blinking an LED on that port.

Signed-off-by: James Hershaw <james.hershaw@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 .../net/nfp/flower/nfp_flower_representor.c   | 30 ++++++++++++++++
 drivers/net/nfp/nfp_ethdev.c                  |  2 ++
 drivers/net/nfp/nfp_net_common.c              | 32 +++++++++++++++++
 drivers/net/nfp/nfp_net_common.h              |  2 ++
 drivers/net/nfp/nfpcore/nfp_nsp.h             |  1 +
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         | 36 +++++++++++++++++++
 6 files changed, 103 insertions(+)

diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 3d043e052a..01ca8a6768 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -88,6 +88,30 @@ nfp_repr_get_module_eeprom(struct rte_eth_dev *dev,
 	return nfp_net_get_module_eeprom(dev, info);
 }
 
+static int
+nfp_flower_repr_led_on(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_on(dev);
+}
+
+static int
+nfp_flower_repr_led_off(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_off(dev);
+}
+
 static int
 nfp_flower_repr_link_update(struct rte_eth_dev *dev,
 		__rte_unused int wait_to_complete)
@@ -623,6 +647,9 @@ static const struct eth_dev_ops nfp_flower_multiple_pf_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
@@ -661,6 +688,9 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static uint32_t
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 2ee76d309c..f54483822f 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -983,6 +983,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
 	.set_eeprom             = nfp_net_set_eeprom,
 	.get_module_info        = nfp_net_get_module_info,
 	.get_module_eeprom      = nfp_net_get_module_eeprom,
+	.dev_led_on             = nfp_net_led_on,
+	.dev_led_off            = nfp_net_led_off,
 };
 
 static inline void
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index a45837353a..e68ce68229 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -3181,3 +3181,35 @@ nfp_net_get_module_eeprom(struct rte_eth_dev *dev,
 	nfp_nsp_close(nsp);
 	return ret;
 }
+
+static int
+nfp_net_led_control(struct rte_eth_dev *dev,
+		bool is_on)
+{
+	int ret;
+	uint32_t nfp_idx;
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = dev->process_private;
+	nfp_idx = nfp_net_get_nfp_index(dev);
+
+	ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Set nfp idmode failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nfp_net_led_on(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, true);
+}
+
+int
+nfp_net_led_off(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, false);
+}
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 5ad698cad2..d85a00a75e 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -399,6 +399,8 @@ int nfp_net_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eepr
 int nfp_net_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom);
 int nfp_net_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *info);
 int nfp_net_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+int nfp_net_led_on(struct rte_eth_dev *dev);
+int nfp_net_led_off(struct rte_eth_dev *dev);
 
 #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
 	((struct nfp_app_fw_nic *)app_fw_priv)
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
index 0ae10dabfb..6230a84e34 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -216,6 +216,7 @@ int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed);
 int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes);
 int nfp_eth_set_tx_pause(struct nfp_nsp *nsp, bool tx_pause);
 int nfp_eth_set_rx_pause(struct nfp_nsp *nsp, bool rx_pause);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, uint32_t idx, bool is_on);
 
 /* NSP static information */
 struct nfp_nsp_identify {
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
index 1fcd54656a..404690d05f 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -44,6 +44,7 @@
 #define NSP_ETH_CTRL_SET_LANES          RTE_BIT64(5)
 #define NSP_ETH_CTRL_SET_ANEG           RTE_BIT64(6)
 #define NSP_ETH_CTRL_SET_FEC            RTE_BIT64(7)
+#define NSP_ETH_CTRL_SET_IDMODE         RTE_BIT64(8)
 #define NSP_ETH_CTRL_SET_TX_PAUSE       RTE_BIT64(10)
 #define NSP_ETH_CTRL_SET_RX_PAUSE       RTE_BIT64(11)
 
@@ -736,3 +737,38 @@ nfp_eth_set_rx_pause(struct nfp_nsp *nsp,
 	return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
 			NSP_ETH_STATE_RX_PAUSE, rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
 }
+
+int
+nfp_eth_set_idmode(struct nfp_cpp *cpp,
+		uint32_t idx,
+		bool is_on)
+{
+	uint64_t reg;
+	struct nfp_nsp *nsp;
+	union eth_table_entry *entries;
+
+	nsp = nfp_eth_config_start(cpp, idx);
+	if (nsp == NULL)
+		return -EIO;
+
+	/*
+	 * Older ABI versions did support this feature, however this has only
+	 * been reliable since ABI 32.
+	 */
+	if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+		PMD_DRV_LOG(ERR, "Operation only supported on ABI 32 or newer.");
+		nfp_eth_config_cleanup_end(nsp);
+		return -ENOTSUP;
+	}
+
+	entries = nfp_nsp_config_entries(nsp);
+
+	reg = rte_le_to_cpu_64(entries[idx].control);
+	reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+	reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, is_on);
+	entries[idx].control = rte_cpu_to_le_64(reg);
+
+	nfp_nsp_config_set_modified(nsp, 1);
+
+	return nfp_eth_config_commit_end(nsp);
+}
-- 
2.43.5


^ permalink raw reply	[relevance 6%]

* Re: [PATCH V3 7/7] mlx5: add backward compatibility for RDMA monitor
  2024-10-29 16:26  3%     ` Stephen Hemminger
@ 2024-10-30  8:25  0%       ` Minggang(Gavin) Li
  0 siblings, 0 replies; 169+ results
From: Minggang(Gavin) Li @ 2024-10-30  8:25 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: viacheslavo, matan, orika, thomas, Dariusz Sosnowski, Bing Zhao,
	Suanming Mou, dev, rasland


On 10/30/2024 12:26 AM, Stephen Hemminger wrote:
> On Tue, 29 Oct 2024 15:42:56 +0200
> "Minggang Li(Gavin)" <gavinl@nvidia.com> wrote:
>
>>   
>> +* **Updated NVIDIA mlx5 driver.**
>> +
>> +  Optimized port probe in large scale.
>> +  This feature enhances the efficiency of probing VF/SFs on a large scale
>> +  by significantly reducing the probing time. To activate this feature,
>> +  set ``probe_opt_en`` to a non-zero value during device probing. It
>> +  leverages a capability from the RDMA driver, expected to be released in
>> +  the upcoming kernel version 6.13 or its equivalent in OFED 24.10,
>> +  specifically the RDMA monitor. For additional details on the limitations
>> +  of devargs, refer to "doc/guides/nics/mlx5.rst".
>> +
>> +  If there are lots of VFs/SFs to be probed by the application, eg, 300
>> +  VFs/SFs, the option should be enabled to save probing time.
> IMHO the kernel parts have to be available in a released kernel version.
> Otherwise the kernel API/ABI is not stable and there is a possibility of user confusion.
>
> This needs to stay in "awaiting upstream" state until kernel is released
Sorry, it's a typo. The dependent kernel is 6.12 which is in RC. Do you 
think we should wait for it to be released to push the patch to DPDK 
upstream?

^ permalink raw reply	[relevance 0%]

* [PATCH 4/4] net/nfp: add support for port identify
  @ 2024-10-30  8:27  6%   ` Chaoyong He
    1 sibling, 0 replies; 169+ results
From: Chaoyong He @ 2024-10-30  8:27 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He, James Hershaw

Implement the necessary functions to allow user to visually identify a
physical port associated with a netdev by blinking an LED on that port.

Signed-off-by: James Hershaw <james.hershaw@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 .../net/nfp/flower/nfp_flower_representor.c   | 30 ++++++++++++++++
 drivers/net/nfp/nfp_ethdev.c                  |  2 ++
 drivers/net/nfp/nfp_net_common.c              | 32 +++++++++++++++++
 drivers/net/nfp/nfp_net_common.h              |  2 ++
 drivers/net/nfp/nfpcore/nfp_nsp.h             |  1 +
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         | 36 +++++++++++++++++++
 6 files changed, 103 insertions(+)

diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 3d043e052a..01ca8a6768 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -88,6 +88,30 @@ nfp_repr_get_module_eeprom(struct rte_eth_dev *dev,
 	return nfp_net_get_module_eeprom(dev, info);
 }
 
+static int
+nfp_flower_repr_led_on(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_on(dev);
+}
+
+static int
+nfp_flower_repr_led_off(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_off(dev);
+}
+
 static int
 nfp_flower_repr_link_update(struct rte_eth_dev *dev,
 		__rte_unused int wait_to_complete)
@@ -623,6 +647,9 @@ static const struct eth_dev_ops nfp_flower_multiple_pf_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
@@ -661,6 +688,9 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static uint32_t
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 2ee76d309c..f54483822f 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -983,6 +983,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
 	.set_eeprom             = nfp_net_set_eeprom,
 	.get_module_info        = nfp_net_get_module_info,
 	.get_module_eeprom      = nfp_net_get_module_eeprom,
+	.dev_led_on             = nfp_net_led_on,
+	.dev_led_off            = nfp_net_led_off,
 };
 
 static inline void
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index a45837353a..e68ce68229 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -3181,3 +3181,35 @@ nfp_net_get_module_eeprom(struct rte_eth_dev *dev,
 	nfp_nsp_close(nsp);
 	return ret;
 }
+
+static int
+nfp_net_led_control(struct rte_eth_dev *dev,
+		bool is_on)
+{
+	int ret;
+	uint32_t nfp_idx;
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = dev->process_private;
+	nfp_idx = nfp_net_get_nfp_index(dev);
+
+	ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Set nfp idmode failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nfp_net_led_on(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, true);
+}
+
+int
+nfp_net_led_off(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, false);
+}
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 5ad698cad2..d85a00a75e 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -399,6 +399,8 @@ int nfp_net_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eepr
 int nfp_net_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom);
 int nfp_net_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *info);
 int nfp_net_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+int nfp_net_led_on(struct rte_eth_dev *dev);
+int nfp_net_led_off(struct rte_eth_dev *dev);
 
 #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
 	((struct nfp_app_fw_nic *)app_fw_priv)
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
index 0ae10dabfb..6230a84e34 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -216,6 +216,7 @@ int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed);
 int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes);
 int nfp_eth_set_tx_pause(struct nfp_nsp *nsp, bool tx_pause);
 int nfp_eth_set_rx_pause(struct nfp_nsp *nsp, bool rx_pause);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, uint32_t idx, bool is_on);
 
 /* NSP static information */
 struct nfp_nsp_identify {
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
index 1fcd54656a..404690d05f 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -44,6 +44,7 @@
 #define NSP_ETH_CTRL_SET_LANES          RTE_BIT64(5)
 #define NSP_ETH_CTRL_SET_ANEG           RTE_BIT64(6)
 #define NSP_ETH_CTRL_SET_FEC            RTE_BIT64(7)
+#define NSP_ETH_CTRL_SET_IDMODE         RTE_BIT64(8)
 #define NSP_ETH_CTRL_SET_TX_PAUSE       RTE_BIT64(10)
 #define NSP_ETH_CTRL_SET_RX_PAUSE       RTE_BIT64(11)
 
@@ -736,3 +737,38 @@ nfp_eth_set_rx_pause(struct nfp_nsp *nsp,
 	return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
 			NSP_ETH_STATE_RX_PAUSE, rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
 }
+
+int
+nfp_eth_set_idmode(struct nfp_cpp *cpp,
+		uint32_t idx,
+		bool is_on)
+{
+	uint64_t reg;
+	struct nfp_nsp *nsp;
+	union eth_table_entry *entries;
+
+	nsp = nfp_eth_config_start(cpp, idx);
+	if (nsp == NULL)
+		return -EIO;
+
+	/*
+	 * Older ABI versions did support this feature, however this has only
+	 * been reliable since ABI 32.
+	 */
+	if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+		PMD_DRV_LOG(ERR, "Operation only supported on ABI 32 or newer.");
+		nfp_eth_config_cleanup_end(nsp);
+		return -ENOTSUP;
+	}
+
+	entries = nfp_nsp_config_entries(nsp);
+
+	reg = rte_le_to_cpu_64(entries[idx].control);
+	reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+	reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, is_on);
+	entries[idx].control = rte_cpu_to_le_64(reg);
+
+	nfp_nsp_config_set_modified(nsp, 1);
+
+	return nfp_eth_config_commit_end(nsp);
+}
-- 
2.43.5


^ permalink raw reply	[relevance 6%]

* [PATCH v3 4/4] net/nfp: add support for port identify
  @ 2024-11-01  2:57  6%     ` Chaoyong He
    1 sibling, 0 replies; 169+ results
From: Chaoyong He @ 2024-11-01  2:57 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He, James Hershaw

Implement the necessary functions to allow user to visually identify a
physical port associated with a netdev by blinking an LED on that port.

Signed-off-by: James Hershaw <james.hershaw@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 .../net/nfp/flower/nfp_flower_representor.c   | 30 ++++++++++++++++
 drivers/net/nfp/nfp_ethdev.c                  |  2 ++
 drivers/net/nfp/nfp_net_common.c              | 32 +++++++++++++++++
 drivers/net/nfp/nfp_net_common.h              |  2 ++
 drivers/net/nfp/nfpcore/nfp_nsp.h             |  1 +
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         | 36 +++++++++++++++++++
 6 files changed, 103 insertions(+)

diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 04536ce15f..4017f602a2 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -88,6 +88,30 @@ nfp_repr_get_module_eeprom(struct rte_eth_dev *dev,
 	return nfp_net_get_module_eeprom(dev, info);
 }
 
+static int
+nfp_flower_repr_led_on(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_on(dev);
+}
+
+static int
+nfp_flower_repr_led_off(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_off(dev);
+}
+
 static int
 nfp_flower_repr_link_update(struct rte_eth_dev *dev,
 		__rte_unused int wait_to_complete)
@@ -623,6 +647,9 @@ static const struct eth_dev_ops nfp_flower_multiple_pf_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
@@ -661,6 +688,9 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static uint32_t
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 2ee76d309c..f54483822f 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -983,6 +983,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
 	.set_eeprom             = nfp_net_set_eeprom,
 	.get_module_info        = nfp_net_get_module_info,
 	.get_module_eeprom      = nfp_net_get_module_eeprom,
+	.dev_led_on             = nfp_net_led_on,
+	.dev_led_off            = nfp_net_led_off,
 };
 
 static inline void
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index a45837353a..e68ce68229 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -3181,3 +3181,35 @@ nfp_net_get_module_eeprom(struct rte_eth_dev *dev,
 	nfp_nsp_close(nsp);
 	return ret;
 }
+
+static int
+nfp_net_led_control(struct rte_eth_dev *dev,
+		bool is_on)
+{
+	int ret;
+	uint32_t nfp_idx;
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = dev->process_private;
+	nfp_idx = nfp_net_get_nfp_index(dev);
+
+	ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Set nfp idmode failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nfp_net_led_on(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, true);
+}
+
+int
+nfp_net_led_off(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, false);
+}
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 5ad698cad2..d85a00a75e 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -399,6 +399,8 @@ int nfp_net_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eepr
 int nfp_net_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom);
 int nfp_net_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *info);
 int nfp_net_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+int nfp_net_led_on(struct rte_eth_dev *dev);
+int nfp_net_led_off(struct rte_eth_dev *dev);
 
 #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
 	((struct nfp_app_fw_nic *)app_fw_priv)
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
index 0ae10dabfb..6230a84e34 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -216,6 +216,7 @@ int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed);
 int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes);
 int nfp_eth_set_tx_pause(struct nfp_nsp *nsp, bool tx_pause);
 int nfp_eth_set_rx_pause(struct nfp_nsp *nsp, bool rx_pause);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, uint32_t idx, bool is_on);
 
 /* NSP static information */
 struct nfp_nsp_identify {
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
index 1fcd54656a..404690d05f 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -44,6 +44,7 @@
 #define NSP_ETH_CTRL_SET_LANES          RTE_BIT64(5)
 #define NSP_ETH_CTRL_SET_ANEG           RTE_BIT64(6)
 #define NSP_ETH_CTRL_SET_FEC            RTE_BIT64(7)
+#define NSP_ETH_CTRL_SET_IDMODE         RTE_BIT64(8)
 #define NSP_ETH_CTRL_SET_TX_PAUSE       RTE_BIT64(10)
 #define NSP_ETH_CTRL_SET_RX_PAUSE       RTE_BIT64(11)
 
@@ -736,3 +737,38 @@ nfp_eth_set_rx_pause(struct nfp_nsp *nsp,
 	return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
 			NSP_ETH_STATE_RX_PAUSE, rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
 }
+
+int
+nfp_eth_set_idmode(struct nfp_cpp *cpp,
+		uint32_t idx,
+		bool is_on)
+{
+	uint64_t reg;
+	struct nfp_nsp *nsp;
+	union eth_table_entry *entries;
+
+	nsp = nfp_eth_config_start(cpp, idx);
+	if (nsp == NULL)
+		return -EIO;
+
+	/*
+	 * Older ABI versions did support this feature, however this has only
+	 * been reliable since ABI 32.
+	 */
+	if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+		PMD_DRV_LOG(ERR, "Operation only supported on ABI 32 or newer.");
+		nfp_eth_config_cleanup_end(nsp);
+		return -ENOTSUP;
+	}
+
+	entries = nfp_nsp_config_entries(nsp);
+
+	reg = rte_le_to_cpu_64(entries[idx].control);
+	reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+	reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, is_on);
+	entries[idx].control = rte_cpu_to_le_64(reg);
+
+	nfp_nsp_config_set_modified(nsp, 1);
+
+	return nfp_eth_config_commit_end(nsp);
+}
-- 
2.43.5


^ permalink raw reply	[relevance 6%]

* [PATCH v4 4/4] net/nfp: add LED support
  @ 2024-11-04  1:34  6%       ` Chaoyong He
  0 siblings, 0 replies; 169+ results
From: Chaoyong He @ 2024-11-04  1:34 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He, James Hershaw

Implement the necessary functions to allow user to visually identify a
physical port associated with a netdev by blinking an LED on that port.

Signed-off-by: James Hershaw <james.hershaw@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 doc/guides/nics/features/nfp.ini              |  1 +
 .../net/nfp/flower/nfp_flower_representor.c   | 30 ++++++++++++++++
 drivers/net/nfp/nfp_ethdev.c                  |  2 ++
 drivers/net/nfp/nfp_net_common.c              | 32 +++++++++++++++++
 drivers/net/nfp/nfp_net_common.h              |  2 ++
 drivers/net/nfp/nfpcore/nfp_nsp.h             |  1 +
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         | 36 +++++++++++++++++++
 7 files changed, 104 insertions(+)

diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini
index 5303b3abf5..124663ae00 100644
--- a/doc/guides/nics/features/nfp.ini
+++ b/doc/guides/nics/features/nfp.ini
@@ -27,6 +27,7 @@ Basic stats          = Y
 Stats per queue      = Y
 EEPROM dump          = Y
 Module EEPROM dump   = Y
+LED                  = Y
 Linux                = Y
 Multiprocess aware   = Y
 x86-64               = Y
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 04536ce15f..4017f602a2 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -88,6 +88,30 @@ nfp_repr_get_module_eeprom(struct rte_eth_dev *dev,
 	return nfp_net_get_module_eeprom(dev, info);
 }
 
+static int
+nfp_flower_repr_led_on(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_on(dev);
+}
+
+static int
+nfp_flower_repr_led_off(struct rte_eth_dev *dev)
+{
+	struct nfp_flower_representor *repr;
+
+	repr = dev->data->dev_private;
+	if (!nfp_flower_repr_is_phy(repr))
+		return -EOPNOTSUPP;
+
+	return nfp_net_led_off(dev);
+}
+
 static int
 nfp_flower_repr_link_update(struct rte_eth_dev *dev,
 		__rte_unused int wait_to_complete)
@@ -623,6 +647,9 @@ static const struct eth_dev_ops nfp_flower_multiple_pf_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
@@ -661,6 +688,9 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
 	.set_eeprom           = nfp_repr_set_eeprom,
 	.get_module_info      = nfp_repr_get_module_info,
 	.get_module_eeprom    = nfp_repr_get_module_eeprom,
+
+	.dev_led_on           = nfp_flower_repr_led_on,
+	.dev_led_off          = nfp_flower_repr_led_off,
 };
 
 static uint32_t
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 2ee76d309c..f54483822f 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -983,6 +983,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
 	.set_eeprom             = nfp_net_set_eeprom,
 	.get_module_info        = nfp_net_get_module_info,
 	.get_module_eeprom      = nfp_net_get_module_eeprom,
+	.dev_led_on             = nfp_net_led_on,
+	.dev_led_off            = nfp_net_led_off,
 };
 
 static inline void
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index a45837353a..e68ce68229 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -3181,3 +3181,35 @@ nfp_net_get_module_eeprom(struct rte_eth_dev *dev,
 	nfp_nsp_close(nsp);
 	return ret;
 }
+
+static int
+nfp_net_led_control(struct rte_eth_dev *dev,
+		bool is_on)
+{
+	int ret;
+	uint32_t nfp_idx;
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = dev->process_private;
+	nfp_idx = nfp_net_get_nfp_index(dev);
+
+	ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Set nfp idmode failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nfp_net_led_on(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, true);
+}
+
+int
+nfp_net_led_off(struct rte_eth_dev *dev)
+{
+	return nfp_net_led_control(dev, false);
+}
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 5ad698cad2..d85a00a75e 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -399,6 +399,8 @@ int nfp_net_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eepr
 int nfp_net_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom);
 int nfp_net_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *info);
 int nfp_net_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+int nfp_net_led_on(struct rte_eth_dev *dev);
+int nfp_net_led_off(struct rte_eth_dev *dev);
 
 #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
 	((struct nfp_app_fw_nic *)app_fw_priv)
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
index 0ae10dabfb..6230a84e34 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -216,6 +216,7 @@ int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed);
 int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes);
 int nfp_eth_set_tx_pause(struct nfp_nsp *nsp, bool tx_pause);
 int nfp_eth_set_rx_pause(struct nfp_nsp *nsp, bool rx_pause);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, uint32_t idx, bool is_on);
 
 /* NSP static information */
 struct nfp_nsp_identify {
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
index 1fcd54656a..404690d05f 100644
--- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -44,6 +44,7 @@
 #define NSP_ETH_CTRL_SET_LANES          RTE_BIT64(5)
 #define NSP_ETH_CTRL_SET_ANEG           RTE_BIT64(6)
 #define NSP_ETH_CTRL_SET_FEC            RTE_BIT64(7)
+#define NSP_ETH_CTRL_SET_IDMODE         RTE_BIT64(8)
 #define NSP_ETH_CTRL_SET_TX_PAUSE       RTE_BIT64(10)
 #define NSP_ETH_CTRL_SET_RX_PAUSE       RTE_BIT64(11)
 
@@ -736,3 +737,38 @@ nfp_eth_set_rx_pause(struct nfp_nsp *nsp,
 	return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
 			NSP_ETH_STATE_RX_PAUSE, rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
 }
+
+int
+nfp_eth_set_idmode(struct nfp_cpp *cpp,
+		uint32_t idx,
+		bool is_on)
+{
+	uint64_t reg;
+	struct nfp_nsp *nsp;
+	union eth_table_entry *entries;
+
+	nsp = nfp_eth_config_start(cpp, idx);
+	if (nsp == NULL)
+		return -EIO;
+
+	/*
+	 * Older ABI versions did support this feature, however this has only
+	 * been reliable since ABI 32.
+	 */
+	if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+		PMD_DRV_LOG(ERR, "Operation only supported on ABI 32 or newer.");
+		nfp_eth_config_cleanup_end(nsp);
+		return -ENOTSUP;
+	}
+
+	entries = nfp_nsp_config_entries(nsp);
+
+	reg = rte_le_to_cpu_64(entries[idx].control);
+	reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+	reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, is_on);
+	entries[idx].control = rte_cpu_to_le_64(reg);
+
+	nfp_nsp_config_set_modified(nsp, 1);
+
+	return nfp_eth_config_commit_end(nsp);
+}
-- 
2.43.5


^ permalink raw reply	[relevance 6%]

* Re: [PATCH v14 0/3] power: introduce PM QoS interface
  2024-10-29 13:28  4% ` [PATCH v14 0/3] power: introduce PM QoS interface Huisong Li
  2024-10-29 13:28  5%   ` [PATCH v14 1/3] power: introduce PM QoS API on CPU wide Huisong Li
@ 2024-11-04  9:13  0%   ` lihuisong (C)
  1 sibling, 0 replies; 169+ results
From: lihuisong (C) @ 2024-11-04  9:13 UTC (permalink / raw)
  To: dev, ferruh.yigit, thomas
  Cc: mb, anatoly.burakov, david.hunt, sivaprasad.tummala, stephen,
	konstantin.ananyev, david.marchand, fengchengwen, liuyonglong

Hi Ferruh and Thomas,

Kindly ping for merge.


在 2024/10/29 21:28, Huisong Li 写道:
> The deeper the idle state, the lower the power consumption, but the longer
> the resume time. Some service are delay sensitive and very except the low
> resume time, like interrupt packet receiving mode.
>
> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> interface is used to set and get the resume latency limit on the cpuX for
> userspace. Please see the description in kernel document[1].
> Each cpuidle governor in Linux select which idle state to enter based on
> this CPU resume latency in their idle task.
>
> The per-CPU PM QoS API can be used to control this CPU's idle state
> selection and limit just enter the shallowest idle state to low the delay
> when wake up from idle state by setting strict resume latency (zero value).
>
> [1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
>
> ---
>   v14:
>    - use parse_uint to parse --cpu-resume-latency instead of adding a new
>      parse_int()
>   v13:
>    - not allow negative value for --cpu-resume-latency.
>    - restore to the original value as Konstantin suggested.
>   v12:
>    - add Acked-by Chengwen and Konstantin
>    - fix overflow issue in l3fwd-power when parse command line
>    - add a command parameter to set CPU resume latency
>   v11:
>    - operate the cpu id the lcore mapped by the new function
>      power_get_lcore_mapped_cpu_id().
>   v10:
>    - replace LINE_MAX with a custom macro and fix two typos.
>   v9:
>    - move new feature description from release_24_07.rst to release_24_11.rst.
>   v8:
>    - update the latest code to resolve CI warning
>   v7:
>    - remove a dead code rte_lcore_is_enabled in patch[2/2]
>   v6:
>    - update release_24_07.rst based on dpdk repo to resolve CI warning.
>   v5:
>    - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
>   v4:
>    - fix some comments basd on Stephen
>    - add stdint.h include
>    - add Acked-by Morten Brørup <mb@smartsharesystems.com>
>   v3:
>    - add RTE_POWER_xxx prefix for some macro in header
>    - add the check for lcore_id with rte_lcore_is_enabled
>   v2:
>    - use PM QoS on CPU wide to replace the one on system wide
>
>
> Huisong Li (3):
>    power: introduce PM QoS API on CPU wide
>    examples/l3fwd-power: fix data overflow when parse command line
>    examples/l3fwd-power: add PM QoS configuration
>
>   doc/guides/prog_guide/power_man.rst           |  19 +++
>   doc/guides/rel_notes/release_24_11.rst        |   5 +
>   .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
>   examples/l3fwd-power/main.c                   |  96 +++++++++++---
>   lib/power/meson.build                         |   2 +
>   lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
>   lib/power/rte_power_qos.h                     |  73 +++++++++++
>   lib/power/version.map                         |   4 +
>   8 files changed, 306 insertions(+), 21 deletions(-)
>   create mode 100644 lib/power/rte_power_qos.c
>   create mode 100644 lib/power/rte_power_qos.h
>

^ permalink raw reply	[relevance 0%]

* [PATCH v8 0/3] add ec points to sm2 op
@ 2024-11-04  9:36  3% Arkadiusz Kusztal
  2024-11-04  9:36  5% ` [PATCH v8 1/3] cryptodev: " Arkadiusz Kusztal
  0 siblings, 1 reply; 169+ results
From: Arkadiusz Kusztal @ 2024-11-04  9:36 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

v2:
- rebased against the 24.11 code
v3:
- added feature flag
- added QAT patches
- added test patches
v4:
- replaced feature flag with capability
- split API patches
v5:
- rebased
- clarified usage of the partial flag
v6:
- removed already applied patch 1
- added ABI relase notes comment
- removed camel case
- added flag reference
v7:
- removed SM2 from auth features, in asym it was added in SM2 ECDSA patch
v8:
- fixed an openssl test issue
- added the partial_flag to QAT capabilities

Arkadiusz Kusztal (3):
  cryptodev: add ec points to sm2 op
  crypto/qat: add sm2 encryption/decryption function
  app/test: add test sm2 C1/Kp test cases

 app/test/test_cryptodev_asym.c                | 134 +++++++++++++++++
 app/test/test_cryptodev_sm2_test_vectors.h    | 112 +++++++++++++-
 doc/guides/rel_notes/release_24_11.rst        |   7 +
 .../common/qat/qat_adf/icp_qat_fw_mmp_ids.h   |   3 +
 drivers/common/qat/qat_adf/qat_pke.h          |  20 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c  |  72 ++++++++-
 drivers/crypto/qat/qat_asym.c                 | 140 +++++++++++++++++-
 lib/cryptodev/rte_crypto_asym.h               |  56 +++++--
 8 files changed, 520 insertions(+), 24 deletions(-)

-- 
2.34.1


^ permalink raw reply	[relevance 3%]

* [PATCH v8 1/3] cryptodev: add ec points to sm2 op
  2024-11-04  9:36  3% [PATCH v8 0/3] add ec points to sm2 op Arkadiusz Kusztal
@ 2024-11-04  9:36  5% ` Arkadiusz Kusztal
  2024-11-06 10:08  0%   ` [EXTERNAL] " Akhil Goyal
  0 siblings, 1 reply; 169+ results
From: Arkadiusz Kusztal @ 2024-11-04  9:36 UTC (permalink / raw)
  To: dev; +Cc: gakhil, brian.dooley, Arkadiusz Kusztal

In the case when PMD cannot support the full process of the SM2,
but elliptic curve computation only, additional fields
are needed to handle such a case.

Points C1, kP therefore were added to the SM2 crypto operation struct.

Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
 doc/guides/rel_notes/release_24_11.rst |  3 ++
 lib/cryptodev/rte_crypto_asym.h        | 56 +++++++++++++++++++-------
 2 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 53a5ffebe5..ee9e2cea3c 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -413,6 +413,9 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* cryptodev: The ``rte_crypto_sm2_op_param`` struct member to hold ciphertext
+  is changed to union data type. This change is to support partial SM2 calculation.
+
 
 Known Issues
 ------------
diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h
index aeb46e688e..f095cebcd0 100644
--- a/lib/cryptodev/rte_crypto_asym.h
+++ b/lib/cryptodev/rte_crypto_asym.h
@@ -646,6 +646,8 @@ enum rte_crypto_sm2_op_capa {
 	/**< Random number generator supported in SM2 ops. */
 	RTE_CRYPTO_SM2_PH,
 	/**< Prehash message before crypto op. */
+	RTE_CRYPTO_SM2_PARTIAL,
+	/**< Calculate elliptic curve points only. */
 };
 
 /**
@@ -673,20 +675,46 @@ struct rte_crypto_sm2_op_param {
 	 * will be overwritten by the PMD with the decrypted length.
 	 */
 
-	rte_crypto_param cipher;
-	/**<
-	 * Pointer to input data
-	 * - to be decrypted for SM2 private decrypt.
-	 *
-	 * Pointer to output data
-	 * - for SM2 public encrypt.
-	 * In this case the underlying array should have been allocated
-	 * with enough memory to hold ciphertext output (at least X bytes
-	 * for prime field curve of N bytes and for message M bytes,
-	 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
-	 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
-	 * be overwritten by the PMD with the encrypted length.
-	 */
+	union {
+		rte_crypto_param cipher;
+		/**<
+		 * Pointer to input data
+		 * - to be decrypted for SM2 private decrypt.
+		 *
+		 * Pointer to output data
+		 * - for SM2 public encrypt.
+		 * In this case the underlying array should have been allocated
+		 * with enough memory to hold ciphertext output (at least X bytes
+		 * for prime field curve of N bytes and for message M bytes,
+		 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
+		 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
+		 * be overwritten by the PMD with the encrypted length.
+		 */
+		struct {
+			struct rte_crypto_ec_point c1;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * In the case of encryption, it is an output - point C1 = (x1,y1).
+			 * In the case of decryption, if is an input - point C1 = (x1,y1).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+			struct rte_crypto_ec_point kp;
+			/**<
+			 * This field is used only when PMD does not support the full
+			 * process of the SM2 encryption/decryption, but the elliptic
+			 * curve part only.
+			 *
+			 * It is an output in the encryption case, it is a point
+			 * [k]P = (x2,y2).
+			 *
+			 * Must be used along with the RTE_CRYPTO_SM2_PARTIAL flag.
+			 */
+		};
+	};
 
 	rte_crypto_uint id;
 	/**< The SM2 id used by signer and verifier. */
-- 
2.34.1


^ permalink raw reply	[relevance 5%]

* RE: [EXTERNAL] [PATCH v8 1/3] cryptodev: add ec points to sm2 op
  2024-11-04  9:36  5% ` [PATCH v8 1/3] cryptodev: " Arkadiusz Kusztal
@ 2024-11-06 10:08  0%   ` Akhil Goyal
  2024-11-06 15:17  0%     ` Kusztal, ArkadiuszX
  0 siblings, 1 reply; 169+ results
From: Akhil Goyal @ 2024-11-06 10:08 UTC (permalink / raw)
  To: Arkadiusz Kusztal, dev; +Cc: brian.dooley

> In the case when PMD cannot support the full process of the SM2,
> but elliptic curve computation only, additional fields
> are needed to handle such a case.
> 
> Points C1, kP therefore were added to the SM2 crypto operation struct.
> 
> Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
> ---

Please rebase. CI failed to apply patch.
Please be proactive to fix CI issues if reported.

>  doc/guides/rel_notes/release_24_11.rst |  3 ++
>  lib/cryptodev/rte_crypto_asym.h        | 56 +++++++++++++++++++-------
>  2 files changed, 45 insertions(+), 14 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_24_11.rst
> b/doc/guides/rel_notes/release_24_11.rst
> index 53a5ffebe5..ee9e2cea3c 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -413,6 +413,9 @@ ABI Changes
>    added new structure ``rte_node_xstats`` to ``rte_node_register`` and
>    added ``xstat_off`` to ``rte_node``.
> 
> +* cryptodev: The ``rte_crypto_sm2_op_param`` struct member to hold
> ciphertext
> +  is changed to union data type. This change is to support partial SM2 calculation.
> +
> 
>  Known Issues
>  ------------
> diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h
> index aeb46e688e..f095cebcd0 100644
> --- a/lib/cryptodev/rte_crypto_asym.h
> +++ b/lib/cryptodev/rte_crypto_asym.h
> @@ -646,6 +646,8 @@ enum rte_crypto_sm2_op_capa {
>  	/**< Random number generator supported in SM2 ops. */
>  	RTE_CRYPTO_SM2_PH,
>  	/**< Prehash message before crypto op. */
> +	RTE_CRYPTO_SM2_PARTIAL,
> +	/**< Calculate elliptic curve points only. */
>  };
> 
>  /**
> @@ -673,20 +675,46 @@ struct rte_crypto_sm2_op_param {
>  	 * will be overwritten by the PMD with the decrypted length.
>  	 */
> 
> -	rte_crypto_param cipher;
> -	/**<
> -	 * Pointer to input data
> -	 * - to be decrypted for SM2 private decrypt.
> -	 *
> -	 * Pointer to output data
> -	 * - for SM2 public encrypt.
> -	 * In this case the underlying array should have been allocated
> -	 * with enough memory to hold ciphertext output (at least X bytes
> -	 * for prime field curve of N bytes and for message M bytes,
> -	 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
> -	 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
> -	 * be overwritten by the PMD with the encrypted length.
> -	 */
> +	union {
> +		rte_crypto_param cipher;
> +		/**<
> +		 * Pointer to input data
> +		 * - to be decrypted for SM2 private decrypt.
> +		 *
> +		 * Pointer to output data
> +		 * - for SM2 public encrypt.
> +		 * In this case the underlying array should have been allocated
> +		 * with enough memory to hold ciphertext output (at least X
> bytes
> +		 * for prime field curve of N bytes and for message M bytes,
> +		 * where X = (C1 || C2 || C3) and computed based on SM2 RFC
> as
> +		 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
> +		 * be overwritten by the PMD with the encrypted length.
> +		 */
> +		struct {
> +			struct rte_crypto_ec_point c1;
> +			/**<
> +			 * This field is used only when PMD does not support the
> full
> +			 * process of the SM2 encryption/decryption, but the
> elliptic
> +			 * curve part only.
> +			 *
> +			 * In the case of encryption, it is an output - point C1 =
> (x1,y1).
> +			 * In the case of decryption, if is an input - point C1 =
> (x1,y1).
> +			 *
> +			 * Must be used along with the
> RTE_CRYPTO_SM2_PARTIAL flag.
> +			 */
> +			struct rte_crypto_ec_point kp;
> +			/**<
> +			 * This field is used only when PMD does not support the
> full
> +			 * process of the SM2 encryption/decryption, but the
> elliptic
> +			 * curve part only.
> +			 *
> +			 * It is an output in the encryption case, it is a point
> +			 * [k]P = (x2,y2).
> +			 *
> +			 * Must be used along with the
> RTE_CRYPTO_SM2_PARTIAL flag.
> +			 */
> +		};
> +	};
> 
>  	rte_crypto_uint id;
>  	/**< The SM2 id used by signer and verifier. */
> --
> 2.34.1


^ permalink raw reply	[relevance 0%]

* RE: [EXTERNAL] [PATCH v8 1/3] cryptodev: add ec points to sm2 op
  2024-11-06 10:08  0%   ` [EXTERNAL] " Akhil Goyal
@ 2024-11-06 15:17  0%     ` Kusztal, ArkadiuszX
  0 siblings, 0 replies; 169+ results
From: Kusztal, ArkadiuszX @ 2024-11-06 15:17 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Dooley, Brian

Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Wednesday, November 6, 2024 11:09 AM
> To: Kusztal, ArkadiuszX <arkadiuszx.kusztal@intel.com>; dev@dpdk.org
> Cc: Dooley, Brian <brian.dooley@intel.com>
> Subject: RE: [EXTERNAL] [PATCH v8 1/3] cryptodev: add ec points to sm2 op
> 
> > In the case when PMD cannot support the full process of the SM2, but
> > elliptic curve computation only, additional fields are needed to
> > handle such a case.
> >
> > Points C1, kP therefore were added to the SM2 crypto operation struct.
> >
> > Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
> > ---
> 
> Please rebase. CI failed to apply patch.
> Please be proactive to fix CI issues if reported.

I have deferred the whole patchset, no further action is necessary.

> 
> >  doc/guides/rel_notes/release_24_11.rst |  3 ++
> >  lib/cryptodev/rte_crypto_asym.h        | 56 +++++++++++++++++++-------
> >  2 files changed, 45 insertions(+), 14 deletions(-)
> >
> > diff --git a/doc/guides/rel_notes/release_24_11.rst
> > b/doc/guides/rel_notes/release_24_11.rst
> > index 53a5ffebe5..ee9e2cea3c 100644
> > --- a/doc/guides/rel_notes/release_24_11.rst
> > +++ b/doc/guides/rel_notes/release_24_11.rst
> > @@ -413,6 +413,9 @@ ABI Changes
> >    added new structure ``rte_node_xstats`` to ``rte_node_register`` and
> >    added ``xstat_off`` to ``rte_node``.
> >
> > +* cryptodev: The ``rte_crypto_sm2_op_param`` struct member to hold
> > ciphertext
> > +  is changed to union data type. This change is to support partial SM2
> calculation.
> > +
> >
> >  Known Issues
> >  ------------
> > diff --git a/lib/cryptodev/rte_crypto_asym.h
> > b/lib/cryptodev/rte_crypto_asym.h index aeb46e688e..f095cebcd0 100644
> > --- a/lib/cryptodev/rte_crypto_asym.h
> > +++ b/lib/cryptodev/rte_crypto_asym.h
> > @@ -646,6 +646,8 @@ enum rte_crypto_sm2_op_capa {
> >  	/**< Random number generator supported in SM2 ops. */
> >  	RTE_CRYPTO_SM2_PH,
> >  	/**< Prehash message before crypto op. */
> > +	RTE_CRYPTO_SM2_PARTIAL,
> > +	/**< Calculate elliptic curve points only. */
> >  };
> >
> >  /**
> > @@ -673,20 +675,46 @@ struct rte_crypto_sm2_op_param {
> >  	 * will be overwritten by the PMD with the decrypted length.
> >  	 */
> >
> > -	rte_crypto_param cipher;
> > -	/**<
> > -	 * Pointer to input data
> > -	 * - to be decrypted for SM2 private decrypt.
> > -	 *
> > -	 * Pointer to output data
> > -	 * - for SM2 public encrypt.
> > -	 * In this case the underlying array should have been allocated
> > -	 * with enough memory to hold ciphertext output (at least X bytes
> > -	 * for prime field curve of N bytes and for message M bytes,
> > -	 * where X = (C1 || C2 || C3) and computed based on SM2 RFC as
> > -	 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
> > -	 * be overwritten by the PMD with the encrypted length.
> > -	 */
> > +	union {
> > +		rte_crypto_param cipher;
> > +		/**<
> > +		 * Pointer to input data
> > +		 * - to be decrypted for SM2 private decrypt.
> > +		 *
> > +		 * Pointer to output data
> > +		 * - for SM2 public encrypt.
> > +		 * In this case the underlying array should have been allocated
> > +		 * with enough memory to hold ciphertext output (at least X
> > bytes
> > +		 * for prime field curve of N bytes and for message M bytes,
> > +		 * where X = (C1 || C2 || C3) and computed based on SM2 RFC
> > as
> > +		 * C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
> > +		 * be overwritten by the PMD with the encrypted length.
> > +		 */
> > +		struct {
> > +			struct rte_crypto_ec_point c1;
> > +			/**<
> > +			 * This field is used only when PMD does not support
> the
> > full
> > +			 * process of the SM2 encryption/decryption, but the
> > elliptic
> > +			 * curve part only.
> > +			 *
> > +			 * In the case of encryption, it is an output - point C1 =
> > (x1,y1).
> > +			 * In the case of decryption, if is an input - point C1 =
> > (x1,y1).
> > +			 *
> > +			 * Must be used along with the
> > RTE_CRYPTO_SM2_PARTIAL flag.
> > +			 */
> > +			struct rte_crypto_ec_point kp;
> > +			/**<
> > +			 * This field is used only when PMD does not support
> the
> > full
> > +			 * process of the SM2 encryption/decryption, but the
> > elliptic
> > +			 * curve part only.
> > +			 *
> > +			 * It is an output in the encryption case, it is a point
> > +			 * [k]P = (x2,y2).
> > +			 *
> > +			 * Must be used along with the
> > RTE_CRYPTO_SM2_PARTIAL flag.
> > +			 */
> > +		};
> > +	};
> >
> >  	rte_crypto_uint id;
> >  	/**< The SM2 id used by signer and verifier. */
> > --
> > 2.34.1


^ permalink raw reply	[relevance 0%]

* RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  @ 2024-11-07  9:37  3% ` Jerin Jacob
  2024-11-08  1:39  4%   ` Huichao Cai
    1 sibling, 1 reply; 169+ results
From: Jerin Jacob @ 2024-11-07  9:37 UTC (permalink / raw)
  To: Huichao cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163
  Cc: dev


> -----Original Message-----
> From: Huichao cai <chcchc88@163.com>
> Sent: Thursday, November 7, 2024 1:35 PM
> To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com
> Cc: dev@dpdk.org
> Subject: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling
> nodes
> 
> In the function __rte_graph_ccore_ispatch_stched_node_dequeue, use a slower
> loop to search for the graph, modify the search logic to record the result of the
> first search, and use this record for subsequent searches to improve search
> speed
> In the function __rte_graph_ccore_ispatch_stched_node_dequeue,
> use a slower loop to search for the graph, modify the search logic to record the
> result of the first search, and use this record for subsequent searches to
> improve search speed.
> 
> Signed-off-by: Huichao cai <chcchc88@163.com>
> ---
>  	return graph != NULL ? __graph_sched_node_enqueue(node, graph) :
> false;  } diff --git a/lib/graph/rte_graph_worker_common.h
> b/lib/graph/rte_graph_worker_common.h
> index a518af2..4c2432b 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
>  			unsigned int lcore_id;  /**< Node running lcore. */
>  			uint64_t total_sched_objs; /**< Number of objects
> scheduled. */
>  			uint64_t total_sched_fail; /**< Number of scheduled
> failure. */
> +			struct rte_graph *graph;  /**< Graph corresponding to
> lcore_id. */

Is n't breaking the ABI?

Also, please change commit as following for mcore specific changes 

graph: mcore: ...

>  		} dispatch;
>  	};
>  	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> --
> 1.8.3.1


^ permalink raw reply	[relevance 3%]

* Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-07  9:37  3% ` [EXTERNAL] " Jerin Jacob
@ 2024-11-08  1:39  4%   ` Huichao Cai
  2024-11-08 12:22  3%     ` Jerin Jacob
  0 siblings, 1 reply; 169+ results
From: Huichao Cai @ 2024-11-08  1:39 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram, yanzhirun_163, dev

[-- Attachment #1: Type: text/plain, Size: 117 bytes --]

> Is n't breaking the ABI?

So can't we modify the ABI, or is there any special operation required to modify the ABI?

[-- Attachment #2: Type: text/html, Size: 490 bytes --]

^ permalink raw reply	[relevance 4%]

* RE: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-08  1:39  4%   ` Huichao Cai
@ 2024-11-08 12:22  3%     ` Jerin Jacob
  2024-11-08 13:38  0%       ` David Marchand
  0 siblings, 1 reply; 169+ results
From: Jerin Jacob @ 2024-11-08 12:22 UTC (permalink / raw)
  To: Huichao Cai
  Cc: Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram, yanzhirun_163,
	dev, Thomas Monjalon, david.marchand, Robin Jarry



> -----Original Message-----
> From: Huichao Cai <chcchc88@163.com>
> Sent: Friday, November 8, 2024 7:10 AM
> To: Jerin Jacob <jerinj@marvell.com>
> Cc: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Nithin Kumar
> Dabilpuram <ndabilpuram@marvell.com>; yanzhirun_163@163.com;
> dev@dpdk.org
> Subject: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when
> scheduling nodes
> 
> > Is n't breaking the ABI? So can't we modify the ABI, or is there any
> > special operation required to modify the ABI? ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍
> > ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍
> > ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍
> > ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍ ‍
> 
> ZjQcmQRYFpfptBannerEnd
> 
> > Is n't breaking the ABI?
> 
> So can't we modify the ABI, or is there any special operation required to modify
> the ABI?

Only LTS release (xx.11) can change the ABI after sending deprecation notice.
Looking at the pahole output, one option will be making dispatch and new semi fastpath
Additions like  xstat_off can be min cache aligned to make room for future expansion and 
to make sure have better performance.

For xstat_off addition, there was deprecation notice to update rte_node.
If there are no objection, may be we can try following in this release to not wait
Huichao for one more year.


[main] [dpdk.org] $ git diff
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index a518af2b2a..ec9a82186d 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -104,6 +104,7 @@ struct __rte_cache_aligned rte_node {
        /** Original process function when pcap is enabled. */
        rte_node_process_t original_process;

+       alignas(RTE_CACHE_LINE_MIN_SIZE)
        union {
                /* Fast schedule area for mcore dispatch model */
                struct {
@@ -112,6 +113,7 @@ struct __rte_cache_aligned rte_node {
                        uint64_t total_sched_fail; /**< Number of scheduled failure. */
                } dispatch;
        };
+       alignas(RTE_CACHE_LINE_MIN_SIZE)
        rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
        /* Fast path area  */
        __extension__ struct __rte_cache_aligned {

^ permalink raw reply	[relevance 3%]

* Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-08 12:22  3%     ` Jerin Jacob
@ 2024-11-08 13:38  0%       ` David Marchand
  2024-11-11  5:38  0%         ` Jerin Jacob
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-11-08 13:38 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163, dev, Thomas Monjalon, Robin Jarry

Hello Jerin,

On Fri, Nov 8, 2024 at 1:22 PM Jerin Jacob <jerinj@marvell.com> wrote:
> > > Is n't breaking the ABI?
> >
> > So can't we modify the ABI, or is there any special operation required to modify
> > the ABI?
>
> Only LTS release (xx.11) can change the ABI after sending deprecation notice.
> Looking at the pahole output, one option will be making dispatch and new semi fastpath
> Additions like  xstat_off can be min cache aligned to make room for future expansion and
> to make sure have better performance.

Adding holes may be a short term solution, but in my opinion, the slow
path part should be entirely hidden and we only expose the fp part.
Reminder, those holes must be in a "known state" as we release v24.11
so that the presence of future additions can be safely detected.


-- 
David Marchand


^ permalink raw reply	[relevance 0%]

* RE: [PATCH] config: limit lcore variable maximum size to 4k
  @ 2024-11-08 22:49  3%       ` Morten Brørup
  0 siblings, 0 replies; 169+ results
From: Morten Brørup @ 2024-11-08 22:49 UTC (permalink / raw)
  To: Thomas Monjalon, David Marchand, Mattias Rönnblom
  Cc: dev, Bruce Richardson, Stephen Hemminger, Chengwen Feng,
	Konstantin Ananyev

> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Friday, 8 November 2024 23.13

> Let's consider based on the need.
> The lcore variables are new and we don't want it to degrade the DPDK
> footprint,
> at least not in this first version.
> 4 KB is a memory page on common systems,
> it looks reasonnable and big enough for a "variable".
> 
> Applied, thanks.

Changing this breaks the API/ABI.

I consider the 4 KB patch a temporary fix, only to make progress on the release candidate, but not the value to go into the final LTS release.
In other words: I formally NAK this patch for the LTS release, but ACK it for the release candidate.

Let's postpone the discussion until after the release candidate.


^ permalink raw reply	[relevance 3%]

* Re: [PATCH 0/2] gpudev: annotate memory allocation
  @ 2024-11-09  0:22  3% ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-11-09  0:22 UTC (permalink / raw)
  To: Elena Agostini; +Cc: dev

On Thu, 17 Oct 2024 15:58:02 -0700
Stephen Hemminger <stephen@networkplumber.org> wrote:

> Use function attributes to catch misuse of GPU memory
> at compile time.
> 
> Stephen Hemminger (2):
>   test-gpudev: avoid use-after-free and free-non-heap warnings
>   gpudev: add malloc annotations to rte_gpu_mem_alloc
> 
>  app/test-gpudev/main.c  | 10 ++++++++-
>  lib/gpudev/rte_gpudev.h | 46 +++++++++++++++++++++--------------------
>  2 files changed, 33 insertions(+), 23 deletions(-)
> 


The problem is that include checker can't handle this.
####################################################################################
#### [Begin job log] "ubuntu-22.04-gcc-debug+doc+examples+tests" at step Build and test
####################################################################################
                 from buildtools/chkincs/chkincs.p/gpudev_driver.c:1:
/home/runner/work/dpdk/dpdk/lib/gpudev/rte_gpudev.h:411:9: error: ‘rte_gpu_mem_free’ is deprecated: Symbol is not yet part of stable ABI [-Werror=deprecated-declarations]
  411 |         __rte_malloc __rte_dealloc(rte_gpu_mem_free, 2);
      |         ^~~~~~~~~~~~
/home/runner/work/dpdk/dpdk/lib/gpudev/rte_gpudev.h:380:5: note: declared here
  380 | int rte_gpu_mem_free(int16_t dev_id, void *ptr);
      |     ^~~~~~~~~~~~~~~~

Either include checker needs to be able to handle experimental symbols
or maybe it is time for gpudev to be moved out experimental status for 25.03?


^ permalink raw reply	[relevance 3%]

* [PATCH v15 0/3] power: introduce PM QoS interface
                     ` (4 preceding siblings ...)
  2024-10-29 13:28  4% ` [PATCH v14 0/3] power: introduce PM QoS interface Huisong Li
@ 2024-11-11  2:25  4% ` Huisong Li
  2024-11-11  2:25  5%   ` [PATCH v15 1/3] power: introduce PM QoS API on CPU wide Huisong Li
  2024-11-11 10:29  0%   ` [PATCH v15 0/3] power: introduce PM QoS interface Thomas Monjalon
  2024-11-11  9:14  4% ` [RESEND PATCH " Huisong Li
  6 siblings, 2 replies; 169+ results
From: Huisong Li @ 2024-11-11  2:25 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v15:
  - fix conflicts due to the new merged patches that rework power lib.
  - add Acked-by: Konstantin Ananyev for patch 3/3.
 v14:
  - use parse_uint to parse --cpu-resume-latency instead of adding a new
    parse_int()
 v13:
  - not allow negative value for --cpu-resume-latency.
  - restore to the original value as Konstantin suggested.
 v12:
  - add Acked-by Chengwen and Konstantin
  - fix overflow issue in l3fwd-power when parse command line
  - add a command parameter to set CPU resume latency
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide

Huisong Li (3):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: fix data overflow when parse command line
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst           |  19 +++
 doc/guides/rel_notes/release_24_11.rst        |   5 +
 .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
 examples/l3fwd-power/main.c                   |  96 +++++++++++---
 lib/power/meson.build                         |   2 +
 lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
 lib/power/rte_power_qos.h                     |  73 +++++++++++
 lib/power/version.map                         |   4 +
 8 files changed, 306 insertions(+), 21 deletions(-)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* [PATCH v15 1/3] power: introduce PM QoS API on CPU wide
  2024-11-11  2:25  4% ` [PATCH v15 " Huisong Li
@ 2024-11-11  2:25  5%   ` Huisong Li
  2024-11-11 10:29  0%   ` [PATCH v15 0/3] power: introduce PM QoS interface Thomas Monjalon
  1 sibling, 0 replies; 169+ results
From: Huisong Li @ 2024-11-11  2:25 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 1ebab77ee9..ecae6b46ef 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 543becba28..187e6823d7 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -276,6 +276,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 4f4dc19687..313aaa6701 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'rte_power_cpufreq.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+        'rte_power_qos.c',
 )
 headers = files(
         'power_cpufreq.h',
@@ -24,6 +25,7 @@ headers = files(
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+        'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..4dd0532b36
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..7a8dab9272
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE		0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT	INT32_MAX
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index f442329bbc..920c8e79b3 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,6 +51,10 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
 
 INTERNAL {
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* RE: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-08 13:38  0%       ` David Marchand
@ 2024-11-11  5:38  0%         ` Jerin Jacob
  2024-11-12  8:51  0%           ` David Marchand
  0 siblings, 1 reply; 169+ results
From: Jerin Jacob @ 2024-11-11  5:38 UTC (permalink / raw)
  To: David Marchand
  Cc: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163, dev, Thomas Monjalon, Robin Jarry



> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Friday, November 8, 2024 7:08 PM
> To: Jerin Jacob <jerinj@marvell.com>
> Cc: Huichao Cai <chcchc88@163.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com; dev@dpdk.org;
> Thomas Monjalon <thomas@monjalon.net>; Robin Jarry <rjarry@redhat.com>
> Subject: Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when
> scheduling nodes
> 
> Hello Jerin, On Fri, Nov 8, 2024 at 1: 22 PM Jerin Jacob <jerinj@ marvell. com>
> wrote: > > > Is n't breaking the ABI? > > > > So can't we modify the ABI, or is
> there any special operation required to modify > > 
> Hello Jerin,

Hello David,

> 
> On Fri, Nov 8, 2024 at 1:22 PM Jerin Jacob <jerinj@marvell.com> wrote:
> > > > Is n't breaking the ABI?
> > >
> > > So can't we modify the ABI, or is there any special operation
> > > required to modify the ABI?
> >
> > Only LTS release (xx.11) can change the ABI after sending deprecation notice.
> > Looking at the pahole output, one option will be making dispatch and
> > new semi fastpath Additions like  xstat_off can be min cache aligned
> > to make room for future expansion and to make sure have better
> performance.
> 
> Adding holes may be a short term solution, but in my opinion, the slow path
> part should be entirely hidden and we only expose the fp part.

The new cache line alignment items are proposed are fastpath items only.

> Reminder, those holes must be in a "known state" as we release v24.11 so that
> the presence of future additions can be safely detected.
> 
> 
> --
> David Marchand


^ permalink raw reply	[relevance 0%]

* RE: [EXTERNAL] [PATCH v2] graph: mcore: optimize graph search
  @ 2024-11-11  5:46  3%   ` Jerin Jacob
  2024-11-13  7:35  5%   ` [PATCH v3 1/2] " Huichao Cai
  1 sibling, 0 replies; 169+ results
From: Jerin Jacob @ 2024-11-11  5:46 UTC (permalink / raw)
  To: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163, david.marchand, Thomas Monjalon
  Cc: dev



> -----Original Message-----
> From: Huichao Cai <chcchc88@163.com>
> Sent: Monday, November 11, 2024 9:33 AM
> To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com
> Cc: dev@dpdk.org; Huichao cai <chcchc88@163.com>
> Subject: [EXTERNAL] [PATCH v2] graph: mcore: optimize graph search
> 
> From: Huichao cai <chcchc88@ 163. com> In the function
> __rte_graph_mcore_dispatch_sched_node_enqueue, use a slower loop to
> search for the graph, modify the search logic to record the result of the first
> search, and use this record for subsequent 
> From: Huichao cai <chcchc88@163.com>
> 
> In the function __rte_graph_mcore_dispatch_sched_node_enqueue,
> use a slower loop to search for the graph, modify the search logic to record the
> result of the first search, and use this record for subsequent searches to
> improve search speed.
> 
> Signed-off-by: Huichao cai <chcchc88@163.com>
> ---
>  lib/graph/rte_graph_model_mcore_dispatch.c | 11 +++++++----
>  lib/graph/rte_graph_worker_common.h        |  1 +
>  2 files changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c
> b/lib/graph/rte_graph_model_mcore_dispatch.c
> index a590fc9..a81d338 100644
> --- a/lib/graph/rte_graph_model_mcore_dispatch.c
> +++ b/lib/graph/rte_graph_model_mcore_dispatch.c
> @@ -118,11 +118,14 @@
>  					      struct rte_graph_rq_head *rq)  {
>  	const unsigned int lcore_id = node->dispatch.lcore_id;
> -	struct rte_graph *graph;
> +	struct rte_graph *graph = node->dispatch.graph;
> 
> -	SLIST_FOREACH(graph, rq, next)
> -		if (graph->dispatch.lcore_id == lcore_id)
> -			break;
> +	if (unlikely((!graph) || (graph->dispatch.lcore_id != lcore_id))) {
> +		SLIST_FOREACH(graph, rq, next)
> +			if (graph->dispatch.lcore_id == lcore_id)
> +				break;
> +		node->dispatch.graph = graph;
> +	}
> 
>  	return graph != NULL ? __graph_sched_node_enqueue(node, graph) :
> false;  } diff --git a/lib/graph/rte_graph_worker_common.h
> b/lib/graph/rte_graph_worker_common.h
> index a518af2..4c2432b 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
>  			unsigned int lcore_id;  /**< Node running lcore. */
>  			uint64_t total_sched_objs; /**< Number of objects
> scheduled. */
>  			uint64_t total_sched_fail; /**< Number of scheduled
> failure. */
> +			struct rte_graph *graph;  /**< Graph corresponding to
> lcore_id. */

Need to conclude the ABI related discussion here before making change
 https://patches.dpdk.org/project/dpdk/patch/1730966682-2632-1-git-send-email-chcchc88@163.com/

>  		} dispatch;
>  	};
>  	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> --
> 1.8.3.1


^ permalink raw reply	[relevance 3%]

* [RESEND PATCH v15 0/3] power: introduce PM QoS interface
                     ` (5 preceding siblings ...)
  2024-11-11  2:25  4% ` [PATCH v15 " Huisong Li
@ 2024-11-11  9:14  4% ` Huisong Li
  2024-11-11  9:14  5%   ` [RESEND PATCH v15 1/3] power: introduce PM QoS API on CPU wide Huisong Li
  6 siblings, 1 reply; 169+ results
From: Huisong Li @ 2024-11-11  9:14 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Please see the description in kernel document[1].
Each cpuidle governor in Linux select which idle state to enter based on
this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from idle state by setting strict resume latency (zero value).

[1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

---
 v15:
  - fix conflicts due to the new merged patches that rework power lib.
  - add Acked-by: Konstantin Ananyev for patch 3/3.
 v14:
  - use parse_uint to parse --cpu-resume-latency instead of adding a new
    parse_int()
 v13:
  - not allow negative value for --cpu-resume-latency.
  - restore to the original value as Konstantin suggested.
 v12:
  - add Acked-by Chengwen and Konstantin
  - fix overflow issue in l3fwd-power when parse command line
  - add a command parameter to set CPU resume latency
 v11:
  - operate the cpu id the lcore mapped by the new function
    power_get_lcore_mapped_cpu_id().
 v10:
  - replace LINE_MAX with a custom macro and fix two typos.
 v9:
  - move new feature description from release_24_07.rst to release_24_11.rst.
 v8:
  - update the latest code to resolve CI warning
 v7:
  - remove a dead code rte_lcore_is_enabled in patch[2/2]
 v6:
  - update release_24_07.rst based on dpdk repo to resolve CI warning.
 v5:
  - use LINE_MAX to replace BUFSIZ, and use snprintf to replace sprintf.
 v4:
  - fix some comments basd on Stephen
  - add stdint.h include
  - add Acked-by Morten Brørup <mb@smartsharesystems.com>
 v3:
  - add RTE_POWER_xxx prefix for some macro in header
  - add the check for lcore_id with rte_lcore_is_enabled
 v2:
  - use PM QoS on CPU wide to replace the one on system wide

Huisong Li (3):
  power: introduce PM QoS API on CPU wide
  examples/l3fwd-power: fix data overflow when parse command line
  examples/l3fwd-power: add PM QoS configuration

 doc/guides/prog_guide/power_man.rst           |  19 +++
 doc/guides/rel_notes/release_24_11.rst        |   5 +
 .../sample_app_ug/l3_forward_power_man.rst    |   5 +-
 examples/l3fwd-power/main.c                   |  96 +++++++++++---
 lib/power/meson.build                         |   2 +
 lib/power/rte_power_qos.c                     | 123 ++++++++++++++++++
 lib/power/rte_power_qos.h                     |  73 +++++++++++
 lib/power/version.map                         |   4 +
 8 files changed, 306 insertions(+), 21 deletions(-)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

-- 
2.22.0


^ permalink raw reply	[relevance 4%]

* [RESEND PATCH v15 1/3] power: introduce PM QoS API on CPU wide
  2024-11-11  9:14  4% ` [RESEND PATCH " Huisong Li
@ 2024-11-11  9:14  5%   ` Huisong Li
  0 siblings, 0 replies; 169+ results
From: Huisong Li @ 2024-11-11  9:14 UTC (permalink / raw)
  To: dev
  Cc: mb, thomas, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong, lihuisong

The deeper the idle state, the lower the power consumption, but the longer
the resume time. Some service are delay sensitive and very except the low
resume time, like interrupt packet receiving mode.

And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
interface is used to set and get the resume latency limit on the cpuX for
userspace. Each cpuidle governor in Linux select which idle state to enter
based on this CPU resume latency in their idle task.

The per-CPU PM QoS API can be used to control this CPU's idle state
selection and limit just enter the shallowest idle state to low the delay
when wake up from by setting strict resume latency (zero value).

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
 doc/guides/prog_guide/power_man.rst    |  19 ++++
 doc/guides/rel_notes/release_24_11.rst |   5 +
 lib/power/meson.build                  |   2 +
 lib/power/rte_power_qos.c              | 123 +++++++++++++++++++++++++
 lib/power/rte_power_qos.h              |  73 +++++++++++++++
 lib/power/version.map                  |   4 +
 6 files changed, 226 insertions(+)
 create mode 100644 lib/power/rte_power_qos.c
 create mode 100644 lib/power/rte_power_qos.h

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 1ebab77ee9..ecae6b46ef 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -107,6 +107,25 @@ User Cases
 The power management mechanism is used to save power when performing L3 forwarding.
 
 
+PM QoS
+------
+
+The "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
+interface is used to set and get the resume latency limit on the cpuX for
+userspace. Each cpuidle governor in Linux select which idle state to enter
+based on this CPU resume latency in their idle task.
+
+The deeper the idle state, the lower the power consumption, but the longer
+the resume time. Some service are latency sensitive and very except the low
+resume time, like interrupt packet receiving mode.
+
+Applications can set and get the CPU resume latency by the
+``rte_power_qos_set_cpu_resume_latency()`` and ``rte_power_qos_get_cpu_resume_latency()``
+respectively. Applications can set a strict resume latency (zero value) by
+the ``rte_power_qos_set_cpu_resume_latency()`` to low the resume latency and
+get better performance (instead, the power consumption of platform may increase).
+
+
 Ethernet PMD Power Management API
 ---------------------------------
 
diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 543becba28..187e6823d7 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -276,6 +276,11 @@ New Features
   This field is used to pass an extra configuration settings such as ability
   to lookup IPv4 addresses in network byte order.
 
+* **Introduce per-CPU PM QoS interface.**
+
+  * Add per-CPU PM QoS interface to low the resume latency when wake up from
+    idle state.
+
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
   A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to
diff --git a/lib/power/meson.build b/lib/power/meson.build
index 4f4dc19687..313aaa6701 100644
--- a/lib/power/meson.build
+++ b/lib/power/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'rte_power_cpufreq.c',
         'rte_power_uncore.c',
         'rte_power_pmd_mgmt.c',
+        'rte_power_qos.c',
 )
 headers = files(
         'power_cpufreq.h',
@@ -24,6 +25,7 @@ headers = files(
         'rte_power_guest_channel.h',
         'rte_power_pmd_mgmt.h',
         'rte_power_uncore.h',
+        'rte_power_qos.h',
 )
 
 deps += ['timer', 'ethdev']
diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c
new file mode 100644
index 0000000000..4dd0532b36
--- /dev/null
+++ b/lib/power/rte_power_qos.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+
+#include "power_common.h"
+#include "rte_power_qos.h"
+
+#define PM_QOS_SYSFILE_RESUME_LATENCY_US	\
+	"/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us"
+
+#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN	32
+
+int
+rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	if (latency < 0) {
+		POWER_LOG(ERR, "latency should be greater than and equal to 0");
+		return -EINVAL;
+	}
+
+	ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different input string.
+	 * 1> the resume latency is 0 if the input is "n/a".
+	 * 2> the resume latency is no constraint if the input is "0".
+	 * 3> the resume latency is the actual value to be set.
+	 */
+	if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE)
+		snprintf(buf, sizeof(buf), "%s", "n/a");
+	else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		snprintf(buf, sizeof(buf), "%u", 0);
+	else
+		snprintf(buf, sizeof(buf), "%u", latency);
+
+	ret = write_core_sysfs_s(f, buf);
+	if (ret != 0)
+		POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+
+	fclose(f);
+
+	return ret;
+}
+
+int
+rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id)
+{
+	char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN];
+	int latency = -1;
+	uint32_t cpu_id;
+	FILE *f;
+	int ret;
+
+	if (!rte_lcore_is_enabled(lcore_id)) {
+		POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id);
+		return -EINVAL;
+	}
+	ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id);
+	if (ret != 0)
+		return ret;
+
+	ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id);
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		return ret;
+	}
+
+	ret = read_core_sysfs_s(f, buf, sizeof(buf));
+	if (ret != 0) {
+		POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s",
+			  cpu_id, strerror(errno));
+		goto out;
+	}
+
+	/*
+	 * Based on the sysfs interface pm_qos_resume_latency_us under
+	 * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning
+	 * is as follows for different output string.
+	 * 1> the resume latency is 0 if the output is "n/a".
+	 * 2> the resume latency is no constraint if the output is "0".
+	 * 3> the resume latency is the actual value in used for other string.
+	 */
+	if (strcmp(buf, "n/a") == 0)
+		latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE;
+	else {
+		latency = strtoul(buf, NULL, 10);
+		latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency;
+	}
+
+out:
+	fclose(f);
+
+	return latency != -1 ? latency : ret;
+}
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
new file mode 100644
index 0000000000..7a8dab9272
--- /dev/null
+++ b/lib/power/rte_power_qos.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 HiSilicon Limited
+ */
+
+#ifndef RTE_POWER_QOS_H
+#define RTE_POWER_QOS_H
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file rte_power_qos.h
+ *
+ * PM QoS API.
+ *
+ * The CPU-wide resume latency limit has a positive impact on this CPU's idle
+ * state selection in each cpuidle governor.
+ * Please see the PM QoS on CPU wide in the following link:
+ * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
+ *
+ * The deeper the idle state, the lower the power consumption, but the
+ * longer the resume time. Some service are delay sensitive and very except the
+ * low resume time, like interrupt packet receiving mode.
+ *
+ * In these case, per-CPU PM QoS API can be used to control this CPU's idle
+ * state selection and limit just enter the shallowest idle state to low the
+ * delay after sleep by setting strict resume latency (zero value).
+ */
+
+#define RTE_POWER_QOS_STRICT_LATENCY_VALUE		0
+#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT	INT32_MAX
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param lcore_id
+ *   target logical core id
+ *
+ * @param latency
+ *   The latency should be greater than and equal to zero in microseconds unit.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the current resume latency of this logical core.
+ * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ * if don't set it.
+ *
+ * @return
+ *   Negative value on failure.
+ *   >= 0 means the actual resume latency limit on this core.
+ */
+__rte_experimental
+int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_POWER_QOS_H */
diff --git a/lib/power/version.map b/lib/power/version.map
index f442329bbc..920c8e79b3 100644
--- a/lib/power/version.map
+++ b/lib/power/version.map
@@ -51,6 +51,10 @@ EXPERIMENTAL {
 	rte_power_set_uncore_env;
 	rte_power_uncore_freqs;
 	rte_power_unset_uncore_env;
+
+	# added in 24.11
+	rte_power_qos_get_cpu_resume_latency;
+	rte_power_qos_set_cpu_resume_latency;
 };
 
 INTERNAL {
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* Re: [PATCH v15 0/3] power: introduce PM QoS interface
  2024-11-11  2:25  4% ` [PATCH v15 " Huisong Li
  2024-11-11  2:25  5%   ` [PATCH v15 1/3] power: introduce PM QoS API on CPU wide Huisong Li
@ 2024-11-11 10:29  0%   ` Thomas Monjalon
  1 sibling, 0 replies; 169+ results
From: Thomas Monjalon @ 2024-11-11 10:29 UTC (permalink / raw)
  To: Huisong Li
  Cc: dev, mb, ferruh.yigit, anatoly.burakov, david.hunt,
	sivaprasad.tummala, stephen, konstantin.ananyev, david.marchand,
	fengchengwen, liuyonglong

11/11/2024 03:25, Huisong Li:
> The deeper the idle state, the lower the power consumption, but the longer
> the resume time. Some service are delay sensitive and very except the low
> resume time, like interrupt packet receiving mode.
> 
> And the "/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us" sysfs
> interface is used to set and get the resume latency limit on the cpuX for
> userspace. Please see the description in kernel document[1].
> Each cpuidle governor in Linux select which idle state to enter based on
> this CPU resume latency in their idle task.
> 
> The per-CPU PM QoS API can be used to control this CPU's idle state
> selection and limit just enter the shallowest idle state to low the delay
> when wake up from idle state by setting strict resume latency (zero value).
> 
> [1] https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us

Applied, thanks.




^ permalink raw reply	[relevance 0%]

* [PATCH] power: fix a typo in the PM QoS guide
@ 2024-11-11 12:52  5% Huisong Li
  2024-11-12  8:35  5% ` [PATCH v2] " Huisong Li
  2024-11-13  0:59  5% ` [PATCH v3] " Huisong Li
  0 siblings, 2 replies; 169+ results
From: Huisong Li @ 2024-11-11 12:52 UTC (permalink / raw)
  To: dev
  Cc: thomas, ferruh.yigit, david.hunt, sivaprasad.tummala,
	konstantin.ananyev, fengchengwen, liuyonglong, lihuisong

The typo in the guide is hard to understand. Necessary to fix it.

Fixes: dd6fd75bf662 ("power: introduce PM QoS API on CPU wide")

Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 doc/guides/prog_guide/power_man.rst | 2 +-
 lib/power/rte_power_qos.h           | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 22e6e4fe1d..024670a9b4 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -118,7 +118,7 @@ based on this CPU resume latency in their idle task.
 
 The deeper the idle state, the lower the power consumption,
 but the longer the resume time.
-Some services are latency sensitive and very except the low resume time,
+Some services are latency sensitive and request the low resume time,
 like interrupt packet receiving mode.
 
 Applications can set and get the CPU resume latency with
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
index 7a8dab9272..a6d3677409 100644
--- a/lib/power/rte_power_qos.h
+++ b/lib/power/rte_power_qos.h
@@ -24,7 +24,7 @@ extern "C" {
  * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
  *
  * The deeper the idle state, the lower the power consumption, but the
- * longer the resume time. Some service are delay sensitive and very except the
+ * longer the resume time. Some service are delay sensitive and request the
  * low resume time, like interrupt packet receiving mode.
  *
  * In these case, per-CPU PM QoS API can be used to control this CPU's idle
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* [PATCH v2] power: fix a typo in the PM QoS guide
  2024-11-11 12:52  5% [PATCH] power: fix a typo in the PM QoS guide Huisong Li
@ 2024-11-12  8:35  5% ` Huisong Li
  2024-11-13  0:59  5% ` [PATCH v3] " Huisong Li
  1 sibling, 0 replies; 169+ results
From: Huisong Li @ 2024-11-12  8:35 UTC (permalink / raw)
  To: dev
  Cc: thomas, ferruh.yigit, david.hunt, sivaprasad.tummala,
	konstantin.ananyev, fengchengwen, liuyonglong, lihuisong

The typo in the guide is hard to understand. Necessary to fix it.

Fixes: dd6fd75bf662 ("power: introduce PM QoS API on CPU wide")

Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 doc/guides/prog_guide/power_man.rst | 2 +-
 lib/power/rte_power_qos.h           | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 22e6e4fe1d..74039e5786 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -118,7 +118,7 @@ based on this CPU resume latency in their idle task.
 
 The deeper the idle state, the lower the power consumption,
 but the longer the resume time.
-Some services are latency sensitive and very except the low resume time,
+Some services are latency sensitive and request a low resume time,
 like interrupt packet receiving mode.
 
 Applications can set and get the CPU resume latency with
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
index 7a8dab9272..ce0c6eda15 100644
--- a/lib/power/rte_power_qos.h
+++ b/lib/power/rte_power_qos.h
@@ -24,7 +24,7 @@ extern "C" {
  * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
  *
  * The deeper the idle state, the lower the power consumption, but the
- * longer the resume time. Some service are delay sensitive and very except the
+ * longer the resume time. Some service are delay sensitive and request a
  * low resume time, like interrupt packet receiving mode.
  *
  * In these case, per-CPU PM QoS API can be used to control this CPU's idle
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-11  5:38  0%         ` Jerin Jacob
@ 2024-11-12  8:51  0%           ` David Marchand
  2024-11-12  9:35  3%             ` Jerin Jacob
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-11-12  8:51 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163, dev, Thomas Monjalon, Robin Jarry

On Mon, Nov 11, 2024 at 6:39 AM Jerin Jacob <jerinj@marvell.com> wrote:
>
>
>
> > -----Original Message-----
> > From: David Marchand <david.marchand@redhat.com>
> > Sent: Friday, November 8, 2024 7:08 PM
> > To: Jerin Jacob <jerinj@marvell.com>
> > Cc: Huichao Cai <chcchc88@163.com>; Kiran Kumar Kokkilagadda
> > <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> > <ndabilpuram@marvell.com>; yanzhirun_163@163.com; dev@dpdk.org;
> > Thomas Monjalon <thomas@monjalon.net>; Robin Jarry <rjarry@redhat.com>
> > Subject: Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when
> > scheduling nodes
> >
> > Hello Jerin, On Fri, Nov 8, 2024 at 1: 22 PM Jerin Jacob <jerinj@ marvell. com>
> > wrote: > > > Is n't breaking the ABI? > > > > So can't we modify the ABI, or is
> > there any special operation required to modify > >
> > Hello Jerin,
>
> Hello David,
>
> >
> > On Fri, Nov 8, 2024 at 1:22 PM Jerin Jacob <jerinj@marvell.com> wrote:
> > > > > Is n't breaking the ABI?
> > > >
> > > > So can't we modify the ABI, or is there any special operation
> > > > required to modify the ABI?
> > >
> > > Only LTS release (xx.11) can change the ABI after sending deprecation notice.
> > > Looking at the pahole output, one option will be making dispatch and
> > > new semi fastpath Additions like  xstat_off can be min cache aligned
> > > to make room for future expansion and to make sure have better
> > performance.
> >
> > Adding holes may be a short term solution, but in my opinion, the slow path
> > part should be entirely hidden and we only expose the fp part.
>
> The new cache line alignment items are proposed are fastpath items only.

I had only noticed the second comment:

+       alignas(RTE_CACHE_LINE_MIN_SIZE)
        rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
        /* Fast path area  */
        ^^^^^^^^^^^^

And I assumed the part in the struct before was slow path.
(it may be worth enhancing these comments, with a single limit of
slow/fast path areas)


>
> > Reminder, those holes must be in a "known state" as we release v24.11 so that
> > the presence of future additions can be safely detected.

If the rte_node objects are allocated by the graph library and zero'd,
then we are good.
It seems to be the case in graph_nodes_populate(), and the rte_node
objects are embedded in the rte_graph object.

Is there another location in the graph library where a rte_node object
is allocated?

If not, and an application can not create a rte_node object, your
proposal looks good to me.


-- 
David Marchand


^ permalink raw reply	[relevance 0%]

* RE: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-12  8:51  0%           ` David Marchand
@ 2024-11-12  9:35  3%             ` Jerin Jacob
  2024-11-12 12:57  0%               ` Huichao Cai
  0 siblings, 1 reply; 169+ results
From: Jerin Jacob @ 2024-11-12  9:35 UTC (permalink / raw)
  To: David Marchand, Huichao Cai
  Cc: Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram, yanzhirun_163,
	dev, Thomas Monjalon, Robin Jarry



> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, November 12, 2024 2:21 PM
> To: Jerin Jacob <jerinj@marvell.com>
> Cc: Huichao Cai <chcchc88@163.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com; dev@dpdk.org;
> Thomas Monjalon <thomas@monjalon.net>; Robin Jarry <rjarry@redhat.com>
> Subject: Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when
> scheduling nodes
> 
> On Mon, Nov 11, 2024 at 6: 39 AM Jerin Jacob <jerinj@ marvell. com> wrote: >
> > > > > -----Original Message----- > > From: David Marchand
> <david. marchand@ redhat. com> > > Sent: Friday, November 8, 2024 7: 08
> 
> On Mon, Nov 11, 2024 at 6:39 AM Jerin Jacob <jerinj@marvell.com> wrote:
> >
> >
> >
> > > -----Original Message-----
> > > From: David Marchand <david.marchand@redhat.com>
> > > Sent: Friday, November 8, 2024 7:08 PM
> > > To: Jerin Jacob <jerinj@marvell.com>
> > > Cc: Huichao Cai <chcchc88@163.com>; Kiran Kumar Kokkilagadda
> > > <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> > > <ndabilpuram@marvell.com>; yanzhirun_163@163.com; dev@dpdk.org;
> > > Thomas Monjalon <thomas@monjalon.net>; Robin Jarry
> > > <rjarry@redhat.com>
> > > Subject: Re: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search
> > > when scheduling nodes
> > >
> > > Hello Jerin, On Fri, Nov 8, 2024 at 1: 22 PM Jerin Jacob <jerinj@ 
> > > marvell. com>
> > > wrote: > > > Is n't breaking the ABI? > > > > So can't we modify the
> > > ABI, or is there any special operation required to modify > > Hello
> > > Jerin,
> >
> > Hello David,
> >
> > >
> > > On Fri, Nov 8, 2024 at 1:22 PM Jerin Jacob <jerinj@marvell.com> wrote:
> > > > > > Is n't breaking the ABI?
> > > > >
> > > > > So can't we modify the ABI, or is there any special operation
> > > > > required to modify the ABI?
> > > >
> > > > Only LTS release (xx.11) can change the ABI after sending deprecation
> notice.
> > > > Looking at the pahole output, one option will be making dispatch
> > > > and new semi fastpath Additions like  xstat_off can be min cache
> > > > aligned to make room for future expansion and to make sure have
> > > > better
> > > performance.
> > >
> > > Adding holes may be a short term solution, but in my opinion, the
> > > slow path part should be entirely hidden and we only expose the fp part.
> >
> > The new cache line alignment items are proposed are fastpath items only.
> 
> I had only noticed the second comment:
> 
> +       alignas(RTE_CACHE_LINE_MIN_SIZE)
>         rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
>         /* Fast path area  */
>         ^^^^^^^^^^^^
> 
> And I assumed the part in the struct before was slow path.
> (it may be worth enhancing these comments, with a single limit of slow/fast
> path areas)

Yes. Xstat_off was new addition as a fastpath item in this release and there was no space
in original Fastpath area. And, Yes, the comment needs to be updated.


> 
> 
> >
> > > Reminder, those holes must be in a "known state" as we release
> > > v24.11 so that the presence of future additions can be safely detected.
> 
> If the rte_node objects are allocated by the graph library and zero'd, then we
> are good.
> It seems to be the case in graph_nodes_populate(), and the rte_node objects
> are embedded in the rte_graph object.
> 
> Is there another location in the graph library where a rte_node object is
> allocated?

No

> 
> If not, and an application can not create a rte_node object, your proposal looks
> good to me.

OK. @Huichao Cai Please send two patches (a) new proposal and (b) your improvement as series.
Update ABI Changes section in  doc/guides/rel_notes/release_24_11.rst  


> 
> 
> --
> David Marchand


^ permalink raw reply	[relevance 3%]

* Re:RE: Re:RE: [EXTERNAL] [PATCH] graph: optimize graph search when scheduling nodes
  2024-11-12  9:35  3%             ` Jerin Jacob
@ 2024-11-12 12:57  0%               ` Huichao Cai
  0 siblings, 0 replies; 169+ results
From: Huichao Cai @ 2024-11-12 12:57 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: David Marchand, Kiran Kumar Kokkilagadda,
	Nithin Kumar Dabilpuram, yanzhirun_163, dev, Thomas Monjalon,
	Robin Jarry

[-- Attachment #1: Type: text/plain, Size: 205 bytes --]

>OK. @Huichao Cai Please send two patches (a) new proposal and (b) your improvement as series.
>Update ABI Changes section in  doc/guides/rel_notes/release_24_11.rst  Ok.I will send these two patches soon.

[-- Attachment #2: Type: text/html, Size: 380 bytes --]

^ permalink raw reply	[relevance 0%]

* Re: [PATCH v15 4/4] eal: add PMU support to tracing library
  @ 2024-11-12 23:09  3%     ` Stephen Hemminger
  2024-11-15 10:24  0%       ` [EXTERNAL] " Tomasz Duszynski
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-11-12 23:09 UTC (permalink / raw)
  To: Tomasz Duszynski
  Cc: Jerin Jacob, Sunil Kumar Kori, Tyler Retzlaff, Ruifeng.Wang,
	bruce.richardson, david.marchand, dev, konstantin.v.ananyev,
	mattias.ronnblom, mb, thomas, zhoumin

On Fri, 25 Oct 2024 10:54:14 +0200
Tomasz Duszynski <tduszynski@marvell.com> wrote:

> In order to profile app one needs to store significant amount of samples
> somewhere for an analysis later on. Since trace library supports
> storing data in a CTF format lets take advantage of that and add a
> dedicated PMU tracepoint.
> 
> Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
> ---
>  app/test/test_trace_perf.c               | 10 ++++
>  doc/guides/prog_guide/profile_app.rst    |  5 ++
>  doc/guides/prog_guide/trace_lib.rst      | 32 +++++++++++
>  lib/eal/common/eal_common_trace.c        |  5 +-
>  lib/eal/common/eal_common_trace_pmu.c    | 38 ++++++++++++++
>  lib/eal/common/eal_common_trace_points.c |  5 ++
>  lib/eal/common/eal_trace.h               |  4 ++
>  lib/eal/common/meson.build               |  1 +
>  lib/eal/include/rte_eal_trace.h          | 11 ++++
>  lib/eal/version.map                      |  1 +
>  lib/pmu/rte_pmu.c                        | 67 +++++++++++++++++++++++-
>  lib/pmu/rte_pmu.h                        | 24 +++++++--
>  lib/pmu/version.map                      |  1 +
>  13 files changed, 198 insertions(+), 6 deletions(-)
>  create mode 100644 lib/eal/common/eal_common_trace_pmu.c


There is an issue with calling a rte_experimental function.

-------------------------------BEGIN LOGS----------------------------
####################################################################################
#### [Begin job log] "ubuntu-22.04-gcc-debug+doc+examples+tests" at step Build and test
####################################################################################
[3384/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o
FAILED: buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o 
ccache gcc -Ibuildtools/chkincs/chkincs.p -Ibuildtools/chkincs -I../buildtools/chkincs -Iexamples/l3fwd -I../examples/l3fwd -I../examples/common -Idrivers/bus/vdev -I../drivers/bus/vdev -I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -Ilib/metrics -I../lib/metrics -Ilib/telemetry -I../lib/telemetry -Ilib/pmu -I../lib/pmu -Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -Ilib/pci -I../lib/pci -Idrivers/bus/vmbus -I../drivers/bus/vmbus -I../drivers/bus/vmbus/linux -Ilib/argparse -I../lib/argparse -Ilib/ptr_compress -I../lib/ptr_compress -Ilib/ring -I../lib/ring -Ilib/rcu -I../lib/rcu -Ilib/mempool -I../lib/mempool -Ilib/mbuf -I../lib/mbuf -Ilib/net -I../lib/net -Ilib/meter -I../lib/meter -Ilib/ethdev -I../lib/ethdev -Ilib/cmdline -I../lib/cmdline -Ilib/hash -I../lib/hash -Ilib/timer -I../lib/timer -Ilib/acl -I../lib/acl -Ilib/bbdev -I../lib/bbdev -Ilib/bitratestats -I../lib/bitratestats -Ilib/bpf -I../lib/bpf -Ilib/cfgfile -I../lib/cfgfile -Ilib/compressdev -I../lib/compressdev -Ilib/cryptodev -I../lib/cryptodev -Ilib/distributor -I../lib/distributor -Ilib/dmadev -I../lib/dmadev -Ilib/efd -I../lib/efd -Ilib/eventdev -I../lib/eventdev -Ilib/dispatcher -I../lib/dispatcher -Ilib/gpudev -I../lib/gpudev -Ilib/gro -I../lib/gro -Ilib/gso -I../lib/gso -Ilib/ip_frag -I../lib/ip_frag -Ilib/jobstats -I../lib/jobstats -Ilib/latencystats -I../lib/latencystats -Ilib/lpm -I../lib/lpm -Ilib/member -I../lib/member -Ilib/pcapng -I../lib/pcapng -Ilib/power -I../lib/power -Ilib/rawdev -I../lib/rawdev -Ilib/regexdev -I../lib/regexdev -Ilib/mldev -I../lib/mldev -Ilib/rib -I../lib/rib -Ilib/reorder -I../lib/reorder -Ilib/sched -I../lib/sched -Ilib/security -I../lib/security -Ilib/stack -I../lib/stack -Ilib/vhost -I../lib/vhost -Ilib/ipsec -I../lib/ipsec -Ilib/pdcp -I../lib/pdcp -Ilib/fib -I../lib/fib -Ilib/port -I../lib/port -Ilib/pdump -I../lib/pdump -Ilib/table -I../lib/table -Ilib/pipeline -I../lib/pipeline -Ilib/graph -I../lib/graph -Ilib/node -I../lib/node -fdiagnostics-color=always -pipe -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra -Werror -std=c11 -g -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-packed-not-aligned -Wno-missing-field-initializers -D_GNU_SOURCE -march=corei7 -mrtm -MD -MQ buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o -MF buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o.d -o buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o -c buildtools/chkincs/chkincs.p/rte_pmu.c
In file included from buildtools/chkincs/chkincs.p/rte_pmu.c:1:
/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h: In function ‘rte_pmu_read’:
/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:214:17: error: ‘__rte_pmu_enable_group’ is deprecated: Symbol is not yet part of stable ABI [-Werror=deprecated-declarations]
  214 |                 ret = __rte_pmu_enable_group(group);
      |                 ^~~
/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:132:1: note: declared here
  132 | __rte_pmu_enable_group(struct rte_pmu_event_group *group);
      | ^~~~~~~~~~~~~~~~~~~~~~
/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:222:9: error: ‘__rte_pmu_read_userpage’ is deprecated: Symbol is not yet part of stable ABI [-Werror=deprecated-declarations]
  222 |         return __rte_pmu_read_userpage(group->mmap_pages[index]);
      |         ^~~~~~
/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:86:1: note: declared here
   86 | __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
      | ^~~~~~~~~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors
[3385/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_byteorder.c.o
[3386/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_atomic.c.o
[3387/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_rtm.c.o
[3388/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_memcpy.c.o
[3389/6468] Compiling C object app/dpdk-test.p/test_test_memcpy_perf.c.o
ninja: build stopped: subcommand failed.
##[error]Process completed with exit code 1.

^ permalink raw reply	[relevance 3%]

* [PATCH v3] power: fix a typo in the PM QoS guide
  2024-11-11 12:52  5% [PATCH] power: fix a typo in the PM QoS guide Huisong Li
  2024-11-12  8:35  5% ` [PATCH v2] " Huisong Li
@ 2024-11-13  0:59  5% ` Huisong Li
  1 sibling, 0 replies; 169+ results
From: Huisong Li @ 2024-11-13  0:59 UTC (permalink / raw)
  To: dev
  Cc: thomas, ferruh.yigit, david.hunt, sivaprasad.tummala,
	konstantin.ananyev, fengchengwen, liuyonglong, lihuisong

The typo in the guide is hard to understand. Necessary to fix it.

Fixes: dd6fd75bf662 ("power: introduce PM QoS API on CPU wide")

Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 doc/guides/prog_guide/power_man.rst | 2 +-
 lib/power/rte_power_qos.h           | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 22e6e4fe1d..74039e5786 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -118,7 +118,7 @@ based on this CPU resume latency in their idle task.
 
 The deeper the idle state, the lower the power consumption,
 but the longer the resume time.
-Some services are latency sensitive and very except the low resume time,
+Some services are latency sensitive and request a low resume time,
 like interrupt packet receiving mode.
 
 Applications can set and get the CPU resume latency with
diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h
index 7a8dab9272..05a3f51ae2 100644
--- a/lib/power/rte_power_qos.h
+++ b/lib/power/rte_power_qos.h
@@ -24,8 +24,8 @@ extern "C" {
  * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us
  *
  * The deeper the idle state, the lower the power consumption, but the
- * longer the resume time. Some service are delay sensitive and very except the
- * low resume time, like interrupt packet receiving mode.
+ * longer the resume time. Some services are latency sensitive and request
+ * a low resume time, like interrupt packet receiving mode.
  *
  * In these case, per-CPU PM QoS API can be used to control this CPU's idle
  * state selection and limit just enter the shallowest idle state to low the
-- 
2.22.0


^ permalink raw reply	[relevance 5%]

* [PATCH v3 1/2] graph: mcore: optimize graph search
    2024-11-11  5:46  3%   ` [EXTERNAL] " Jerin Jacob
@ 2024-11-13  7:35  5%   ` Huichao Cai
  2024-11-13  7:35  5%     ` [PATCH v3 2/2] graph: add alignment to the member of rte_node Huichao Cai
  2024-11-14  8:45  5%     ` [PATCH v4 1/2] graph: mcore: optimize graph search Huichao Cai
  1 sibling, 2 replies; 169+ results
From: Huichao Cai @ 2024-11-13  7:35 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

In the function __rte_graph_mcore_dispatch_sched_node_enqueue,
use a slower loop to search for the graph, modify the search logic
to record the result of the first search, and use this record for
subsequent searches to improve search speed.

Due to the addition of a "graph" field in the "rte_node" structure,
update file release_24_11.rst.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_24_11.rst     |  1 +
 lib/graph/rte_graph_model_mcore_dispatch.c | 11 +++++++----
 lib/graph/rte_graph_worker_common.h        |  1 +
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 9dc739c4cb..592116b979 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -423,6 +423,7 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
 
 Known Issues
 ------------
diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c b/lib/graph/rte_graph_model_mcore_dispatch.c
index a590fc9497..a81d338227 100644
--- a/lib/graph/rte_graph_model_mcore_dispatch.c
+++ b/lib/graph/rte_graph_model_mcore_dispatch.c
@@ -118,11 +118,14 @@ __rte_graph_mcore_dispatch_sched_node_enqueue(struct rte_node *node,
 					      struct rte_graph_rq_head *rq)
 {
 	const unsigned int lcore_id = node->dispatch.lcore_id;
-	struct rte_graph *graph;
+	struct rte_graph *graph = node->dispatch.graph;
 
-	SLIST_FOREACH(graph, rq, next)
-		if (graph->dispatch.lcore_id == lcore_id)
-			break;
+	if (unlikely((!graph) || (graph->dispatch.lcore_id != lcore_id))) {
+		SLIST_FOREACH(graph, rq, next)
+			if (graph->dispatch.lcore_id == lcore_id)
+				break;
+		node->dispatch.graph = graph;
+	}
 
 	return graph != NULL ? __graph_sched_node_enqueue(node, graph) : false;
 }
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index a518af2b2a..4c2432b47f 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
+			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
 	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* [PATCH v3 2/2] graph: add alignment to the member of rte_node
  2024-11-13  7:35  5%   ` [PATCH v3 1/2] " Huichao Cai
@ 2024-11-13  7:35  5%     ` Huichao Cai
  2024-11-14  7:14  0%       ` [EXTERNAL] " Jerin Jacob
  2024-11-14  8:45  5%     ` [PATCH v4 1/2] graph: mcore: optimize graph search Huichao Cai
  1 sibling, 1 reply; 169+ results
From: Huichao Cai @ 2024-11-13  7:35 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

The members "dispatch" and "xstat_off" of the structure "rte_node"
can be min cache aligned to make room for future expansion and to
make sure have better performance.

Due to the modification of the alignment of some members of the
"rte_node" structure, update file release_24_11.rst.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_24_11.rst | 3 +++
 lib/graph/rte_graph_worker_common.h    | 5 ++++-
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 592116b979..6903b1d0f0 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -425,6 +425,9 @@ ABI Changes
 
 * graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
 
+* graph: The members ``dispatch`` and ``xstat_off`` of the structure ``rte_node`` have been
+  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
+
 Known Issues
 ------------
 
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index 4c2432b47f..9e99278a0a 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -104,6 +104,7 @@ struct __rte_cache_aligned rte_node {
 	/** Original process function when pcap is enabled. */
 	rte_node_process_t original_process;
 
+	alignas(RTE_CACHE_LINE_MIN_SIZE)
 	union {
 		/* Fast schedule area for mcore dispatch model */
 		struct {
@@ -113,8 +114,10 @@ struct __rte_cache_aligned rte_node {
 			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
-	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
+
 	/* Fast path area  */
+	alignas(RTE_CACHE_LINE_MIN_SIZE)
+	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
 	__extension__ struct __rte_cache_aligned {
 #define RTE_NODE_CTX_SZ 16
 		union {
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* RE: [EXTERNAL] [PATCH v3 2/2] graph: add alignment to the member of rte_node
  2024-11-13  7:35  5%     ` [PATCH v3 2/2] graph: add alignment to the member of rte_node Huichao Cai
@ 2024-11-14  7:14  0%       ` Jerin Jacob
  0 siblings, 0 replies; 169+ results
From: Jerin Jacob @ 2024-11-14  7:14 UTC (permalink / raw)
  To: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163
  Cc: dev



> -----Original Message-----
> From: Huichao Cai <chcchc88@163.com>
> Sent: Wednesday, November 13, 2024 1:06 PM
> To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com
> Cc: dev@dpdk.org
> Subject: [EXTERNAL] [PATCH v3 2/2] graph: add alignment to the member of
> rte_node
> 
> The members "dispatch" and "xstat_off" of the structure "rte_node" can be min
> cache aligned to make room for future expansion and to make sure have better
> performance. Due to the modification of the alignment of some members of the
> "rte_node"
> 
> The members "dispatch" and "xstat_off" of the structure "rte_node"
> can be min cache aligned to make room for future expansion and to make sure
> have better performance.
> 
> Due to the modification of the alignment of some members of the "rte_node"
> structure, update file release_24_11.rst.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
>  doc/guides/rel_notes/release_24_11.rst | 3 +++
>  lib/graph/rte_graph_worker_common.h    | 5 ++++-
>  2 files changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/doc/guides/rel_notes/release_24_11.rst
> b/doc/guides/rel_notes/release_24_11.rst
> index 592116b979..6903b1d0f0 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -425,6 +425,9 @@ ABI Changes
> 
>  * graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node``
> structure.
> 
> +* graph: The members ``dispatch`` and ``xstat_off`` of the structure
> +``rte_node`` have been
> +  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
> +
>  Known Issues
>  ------------
> 
> diff --git a/lib/graph/rte_graph_worker_common.h
> b/lib/graph/rte_graph_worker_common.h
> index 4c2432b47f..9e99278a0a 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -104,6 +104,7 @@ struct __rte_cache_aligned rte_node {
>  	/** Original process function when pcap is enabled. */
>  	rte_node_process_t original_process;
> 
> +	alignas(RTE_CACHE_LINE_MIN_SIZE)
>  	union {
>  		/* Fast schedule area for mcore dispatch model */
>  		struct {
> @@ -113,8 +114,10 @@ struct __rte_cache_aligned rte_node {
>  			struct rte_graph *graph;  /**< Graph corresponding to
> lcore_id. */
>  		} dispatch;
>  	};
> -	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> +
>  	/* Fast path area  */

Make it as two separate comment, Fast path area cache line 1 and Fastpath area cache line 2.

> +	alignas(RTE_CACHE_LINE_MIN_SIZE)
> +	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
>  	__extension__ struct __rte_cache_aligned {  #define RTE_NODE_CTX_SZ
> 16
>  		union {
> --
> 2.27.0


^ permalink raw reply	[relevance 0%]

* [PATCH v4 1/2] graph: mcore: optimize graph search
  2024-11-13  7:35  5%   ` [PATCH v3 1/2] " Huichao Cai
  2024-11-13  7:35  5%     ` [PATCH v3 2/2] graph: add alignment to the member of rte_node Huichao Cai
@ 2024-11-14  8:45  5%     ` Huichao Cai
  2024-11-14  8:45  5%       ` [PATCH v4 2/2] graph: add alignment to the member of rte_node Huichao Cai
  2024-12-13  2:21 10%       ` [PATCH v5] graph: mcore: optimize graph search Huichao Cai
  1 sibling, 2 replies; 169+ results
From: Huichao Cai @ 2024-11-14  8:45 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

In the function __rte_graph_mcore_dispatch_sched_node_enqueue,
use a slower loop to search for the graph, modify the search logic
to record the result of the first search, and use this record for
subsequent searches to improve search speed.

Due to the addition of a "graph" field in the "rte_node" structure,
update file release_24_11.rst.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_24_11.rst     |  1 +
 lib/graph/rte_graph_model_mcore_dispatch.c | 11 +++++++----
 lib/graph/rte_graph_worker_common.h        |  1 +
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 9dc739c4cb..592116b979 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -423,6 +423,7 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
 
 Known Issues
 ------------
diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c b/lib/graph/rte_graph_model_mcore_dispatch.c
index a590fc9497..a81d338227 100644
--- a/lib/graph/rte_graph_model_mcore_dispatch.c
+++ b/lib/graph/rte_graph_model_mcore_dispatch.c
@@ -118,11 +118,14 @@ __rte_graph_mcore_dispatch_sched_node_enqueue(struct rte_node *node,
 					      struct rte_graph_rq_head *rq)
 {
 	const unsigned int lcore_id = node->dispatch.lcore_id;
-	struct rte_graph *graph;
+	struct rte_graph *graph = node->dispatch.graph;
 
-	SLIST_FOREACH(graph, rq, next)
-		if (graph->dispatch.lcore_id == lcore_id)
-			break;
+	if (unlikely((!graph) || (graph->dispatch.lcore_id != lcore_id))) {
+		SLIST_FOREACH(graph, rq, next)
+			if (graph->dispatch.lcore_id == lcore_id)
+				break;
+		node->dispatch.graph = graph;
+	}
 
 	return graph != NULL ? __graph_sched_node_enqueue(node, graph) : false;
 }
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index a518af2b2a..4c2432b47f 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
+			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
 	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* [PATCH v4 2/2] graph: add alignment to the member of rte_node
  2024-11-14  8:45  5%     ` [PATCH v4 1/2] graph: mcore: optimize graph search Huichao Cai
@ 2024-11-14  8:45  5%       ` Huichao Cai
  2024-11-14 10:05  0%         ` [EXTERNAL] " Jerin Jacob
  2024-11-15  1:55  5%         ` [PATCH v5 1/1] graph: improve node layout Huichao Cai
  2024-12-13  2:21 10%       ` [PATCH v5] graph: mcore: optimize graph search Huichao Cai
  1 sibling, 2 replies; 169+ results
From: Huichao Cai @ 2024-11-14  8:45 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

The members dispatch and xstat_off of the structure rte_node
can be min cache aligned to make room for future expansion and to
make sure have better performance. Add corresponding comments.

Due to the modification of the alignment of some members of the
rte_node structure, update file release_24_11.rst.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_24_11.rst | 3 +++
 lib/graph/rte_graph_worker_common.h    | 7 ++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 592116b979..6903b1d0f0 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -425,6 +425,9 @@ ABI Changes
 
 * graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
 
+* graph: The members ``dispatch`` and ``xstat_off`` of the structure ``rte_node`` have been
+  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
+
 Known Issues
 ------------
 
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index 4c2432b47f..d36abec08b 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -104,16 +104,21 @@ struct __rte_cache_aligned rte_node {
 	/** Original process function when pcap is enabled. */
 	rte_node_process_t original_process;
 
+	/** Fast path area cache line 1. */
 	union {
 		/* Fast schedule area for mcore dispatch model */
-		struct {
+		alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
 			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
+
+	/** Fast path area cache line 2. */
+	alignas(RTE_CACHE_LINE_MIN_SIZE)
 	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
+
 	/* Fast path area  */
 	__extension__ struct __rte_cache_aligned {
 #define RTE_NODE_CTX_SZ 16
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* RE: [EXTERNAL] [PATCH v4 2/2] graph: add alignment to the member of rte_node
  2024-11-14  8:45  5%       ` [PATCH v4 2/2] graph: add alignment to the member of rte_node Huichao Cai
@ 2024-11-14 10:05  0%         ` Jerin Jacob
  2024-11-15  1:55  5%         ` [PATCH v5 1/1] graph: improve node layout Huichao Cai
  1 sibling, 0 replies; 169+ results
From: Jerin Jacob @ 2024-11-14 10:05 UTC (permalink / raw)
  To: Huichao Cai, Kiran Kumar Kokkilagadda, Nithin Kumar Dabilpuram,
	yanzhirun_163, david.marchand
  Cc: dev



> -----Original Message-----
> From: Huichao Cai <chcchc88@163.com>
> Sent: Thursday, November 14, 2024 2:15 PM
> To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; yanzhirun_163@163.com
> Cc: dev@dpdk.org
> Subject: [EXTERNAL] [PATCH v4 2/2] graph: add alignment to the member of
> rte_node
> 
> The members dispatch and xstat_off of the structure rte_node can be min cache
> aligned to make room for future expansion and to make sure have better
> performance. Add corresponding comments. Due to the modification of the
> alignment of some members 
> The members dispatch and xstat_off of the structure rte_node can be min cache
> aligned to make room for future expansion and to make sure have better
> performance. Add corresponding comments.
> 

Please change subject to graph: improve node layout


> Due to the modification of the alignment of some members of the rte_node
> structure, update file release_24_11.rst.

The above section is not needed.


> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
>  doc/guides/rel_notes/release_24_11.rst | 3 +++
>  lib/graph/rte_graph_worker_common.h    | 7 ++++++-
>  2 files changed, 9 insertions(+), 1 deletion(-)
> 
> diff --git a/doc/guides/rel_notes/release_24_11.rst
> b/doc/guides/rel_notes/release_24_11.rst
> index 592116b979..6903b1d0f0 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -425,6 +425,9 @@ ABI Changes
> 
>  * graph: added ``graph`` field to the ``dispatch`` structure in the ``rte_node``
> structure.
> 
> +* graph: The members ``dispatch`` and ``xstat_off`` of the structure
> +``rte_node`` have been
> +  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
> +
>  Known Issues
>  ------------
> 
> diff --git a/lib/graph/rte_graph_worker_common.h
> b/lib/graph/rte_graph_worker_common.h
> index 4c2432b47f..d36abec08b 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -104,16 +104,21 @@ struct __rte_cache_aligned rte_node {
>  	/** Original process function when pcap is enabled. */
>  	rte_node_process_t original_process;
> 
> +	/** Fast path area cache line 1. */


Fast schedule area for mcore dispatch model

>  	union {
>  		/* Fast schedule area for mcore dispatch model */

Above comment you can remove it

> -		struct {
> +		alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
>  			unsigned int lcore_id;  /**< Node running lcore. */
>  			uint64_t total_sched_objs; /**< Number of objects
> scheduled. */
>  			uint64_t total_sched_fail; /**< Number of scheduled
> failure. */
>  			struct rte_graph *graph;  /**< Graph corresponding to
> lcore_id. */
>  		} dispatch;
>  	};
> +
> +	/** Fast path area cache line 2. */
> +	alignas(RTE_CACHE_LINE_MIN_SIZE)
>  	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> +
>  	/* Fast path area  */


Fast path area cache line 1

>  	__extension__ struct __rte_cache_aligned {  #define RTE_NODE_CTX_SZ
> 16

With above: Acked-by: Jerin Jacob <jerinj@marvell.com>

Looks loke we cannot merge new feature in rc3. I would suggest skip 1/2 and send only this patch so that 1/2 can merged in next release.

Please add @david.marchand@redhat.com in Cc.


^ permalink raw reply	[relevance 0%]

* [PATCH v5 1/1] graph: improve node layout
  2024-11-14  8:45  5%       ` [PATCH v4 2/2] graph: add alignment to the member of rte_node Huichao Cai
  2024-11-14 10:05  0%         ` [EXTERNAL] " Jerin Jacob
@ 2024-11-15  1:55  5%         ` Huichao Cai
  2024-11-15 14:23  0%           ` Thomas Monjalon
  1 sibling, 1 reply; 169+ results
From: Huichao Cai @ 2024-11-15  1:55 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

The members "dispatch" and "xstat_off" of the structure "rte_node"
can be min cache aligned to make room for future expansion and to
make sure have better performance. Add corresponding comments.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_24_11.rst |  2 ++
 lib/graph/rte_graph_worker_common.h    | 10 +++++++---
 2 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 5063badf39..32800e8cb0 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -491,6 +491,8 @@ ABI Changes
   added new structure ``rte_node_xstats`` to ``rte_node_register`` and
   added ``xstat_off`` to ``rte_node``.
 
+* graph: The members ``dispatch`` and ``xstat_off`` of the structure ``rte_node`` have been
+  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
 
 Known Issues
 ------------
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index a518af2b2a..d3ec88519d 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -104,16 +104,20 @@ struct __rte_cache_aligned rte_node {
 	/** Original process function when pcap is enabled. */
 	rte_node_process_t original_process;
 
+	/** Fast schedule area for mcore dispatch model. */
 	union {
-		/* Fast schedule area for mcore dispatch model */
-		struct {
+		alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
 		} dispatch;
 	};
+
+	/** Fast path area cache line 1. */
+	alignas(RTE_CACHE_LINE_MIN_SIZE)
 	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
-	/* Fast path area  */
+
+	/** Fast path area cache line 2. */
 	__extension__ struct __rte_cache_aligned {
 #define RTE_NODE_CTX_SZ 16
 		union {
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* RE: [EXTERNAL] Re: [PATCH v15 4/4] eal: add PMU support to tracing library
  2024-11-12 23:09  3%     ` Stephen Hemminger
@ 2024-11-15 10:24  0%       ` Tomasz Duszynski
  0 siblings, 0 replies; 169+ results
From: Tomasz Duszynski @ 2024-11-15 10:24 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Jerin Jacob, Sunil Kumar Kori, Tyler Retzlaff, Ruifeng.Wang,
	bruce.richardson, david.marchand, dev, konstantin.v.ananyev,
	mattias.ronnblom, mb, thomas, zhoumin

>-----Original Message-----
>From: Stephen Hemminger <stephen@networkplumber.org>
>Sent: Wednesday, November 13, 2024 12:10 AM
>To: Tomasz Duszynski <tduszynski@marvell.com>
>Cc: Jerin Jacob <jerinj@marvell.com>; Sunil Kumar Kori <skori@marvell.com>; Tyler Retzlaff
><roretzla@linux.microsoft.com>; Ruifeng.Wang@arm.com; bruce.richardson@intel.com;
>david.marchand@redhat.com; dev@dpdk.org; konstantin.v.ananyev@yandex.ru;
>mattias.ronnblom@ericsson.com; mb@smartsharesystems.com; thomas@monjalon.net; zhoumin@loongson.cn
>Subject: [EXTERNAL] Re: [PATCH v15 4/4] eal: add PMU support to tracing library
>
>On Fri, 25 Oct 2024 10: 54: 14 +0200 Tomasz Duszynski <tduszynski@ marvell. com> wrote: > In order
>to profile app one needs to store significant amount of samples > somewhere for an analysis later
>on. Since trace library supports > 
>On Fri, 25 Oct 2024 10:54:14 +0200
>Tomasz Duszynski <tduszynski@marvell.com> wrote:
>
>> In order to profile app one needs to store significant amount of
>> samples somewhere for an analysis later on. Since trace library
>> supports storing data in a CTF format lets take advantage of that and
>> add a dedicated PMU tracepoint.
>>
>> Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
>> ---
>>  app/test/test_trace_perf.c               | 10 ++++
>>  doc/guides/prog_guide/profile_app.rst    |  5 ++
>>  doc/guides/prog_guide/trace_lib.rst      | 32 +++++++++++
>>  lib/eal/common/eal_common_trace.c        |  5 +-
>>  lib/eal/common/eal_common_trace_pmu.c    | 38 ++++++++++++++
>>  lib/eal/common/eal_common_trace_points.c |  5 ++
>>  lib/eal/common/eal_trace.h               |  4 ++
>>  lib/eal/common/meson.build               |  1 +
>>  lib/eal/include/rte_eal_trace.h          | 11 ++++
>>  lib/eal/version.map                      |  1 +
>>  lib/pmu/rte_pmu.c                        | 67 +++++++++++++++++++++++-
>>  lib/pmu/rte_pmu.h                        | 24 +++++++--
>>  lib/pmu/version.map                      |  1 +
>>  13 files changed, 198 insertions(+), 6 deletions(-)  create mode
>> 100644 lib/eal/common/eal_common_trace_pmu.c
>
>
>There is an issue with calling a rte_experimental function.
>
>-------------------------------BEGIN LOGS----------------------------
>####################################################################################
>#### [Begin job log] "ubuntu-22.04-gcc-debug+doc+examples+tests" at step Build and test
>####################################################################################
>[3384/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o
>FAILED: buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o
>ccache gcc -Ibuildtools/chkincs/chkincs.p -Ibuildtools/chkincs -I../buildtools/chkincs -
>Iexamples/l3fwd -I../examples/l3fwd -I../examples/common -Idrivers/bus/vdev -I../drivers/bus/vdev -
>I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -
>I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -
>I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -
>Ilib/metrics -I../lib/metrics -Ilib/telemetry -I../lib/telemetry -Ilib/pmu -I../lib/pmu -
>Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -Ilib/pci -I../lib/pci -
>Idrivers/bus/vmbus -I../drivers/bus/vmbus -I../drivers/bus/vmbus/linux -Ilib/argparse -
>I../lib/argparse -Ilib/ptr_compress -I../lib/ptr_compress -Ilib/ring -I../lib/ring -Ilib/rcu -
>I../lib/rcu -Ilib/mempool -I../lib/mempool -Ilib/mbuf -I../lib/mbuf -Ilib/net -I../lib/net -
>Ilib/meter -I../lib/meter -Ilib/ethdev -I../lib/ethdev -Ilib/cmdline -I../lib/cmdline -Ilib/hash -
>I../lib/hash -Ilib/timer -I../lib/timer -Ilib/acl -I../lib/acl -Ilib/bbdev -I../lib/bbdev -
>Ilib/bitratestats -I../lib/bitratestats -Ilib/bpf -I../lib/bpf -Ilib/cfgfile -I../lib/cfgfile -
>Ilib/compressdev -I../lib/compressdev -Ilib/cryptodev -I../lib/cryptodev -Ilib/distributor -
>I../lib/distributor -Ilib/dmadev -I../lib/dmadev -Ilib/efd -I../lib/efd -Ilib/eventdev -
>I../lib/eventdev -Ilib/dispatcher -I../lib/dispatcher -Ilib/gpudev -I../lib/gpudev -Ilib/gro -
>I../lib/gro -Ilib/gso -I../lib/gso -Ilib/ip_frag -I../lib/ip_frag -Ilib/jobstats -I../lib/jobstats
>-Ilib/latencystats -I../lib/latencystats -Ilib/lpm -I../lib/lpm -Ilib/member -I../lib/member -
>Ilib/pcapng -I../lib/pcapng -Ilib/power -I../lib/power -Ilib/rawdev -I../lib/rawdev -Ilib/regexdev
>-I../lib/regexdev -Ilib/mldev -I../lib/mldev -Ilib/rib -I../lib/rib -Ilib/reorder -I../lib/reorder
>-Ilib/sched -I../lib/sched -Ilib/security -I../lib/security -Ilib/stack -I../lib/stack -Ilib/vhost
>-I../lib/vhost -Ilib/ipsec -I../lib/ipsec -Ilib/pdcp -I../lib/pdcp -Ilib/fib -I../lib/fib -
>Ilib/port -I../lib/port -Ilib/pdump -I../lib/pdump -Ilib/table -I../lib/table -Ilib/pipeline -
>I../lib/pipeline -Ilib/graph -I../lib/graph -Ilib/node -I../lib/node -fdiagnostics-color=always -
>pipe -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra -Werror -std=c11 -g -include rte_config.h -
>Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -
>Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -
>Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-packed-not-aligned -
>Wno-missing-field-initializers -D_GNU_SOURCE -march=corei7 -mrtm -MD -MQ
>buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o -MF buildtools/chkincs/chkincs.p/meson-
>generated_rte_pmu.c.o.d -o buildtools/chkincs/chkincs.p/meson-generated_rte_pmu.c.o -c
>buildtools/chkincs/chkincs.p/rte_pmu.c
>In file included from buildtools/chkincs/chkincs.p/rte_pmu.c:1:
>/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h: In function ‘rte_pmu_read’:
>/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:214:17: error: ‘__rte_pmu_enable_group’ is
>deprecated: Symbol is not yet part of stable ABI [-Werror=deprecated-declarations]
>  214 |                 ret = __rte_pmu_enable_group(group);
>      |                 ^~~
>/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:132:1: note: declared here
>  132 | __rte_pmu_enable_group(struct rte_pmu_event_group *group);
>      | ^~~~~~~~~~~~~~~~~~~~~~
>/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:222:9: error: ‘__rte_pmu_read_userpage’ is
>deprecated: Symbol is not yet part of stable ABI [-Werror=deprecated-declarations]
>  222 |         return __rte_pmu_read_userpage(group->mmap_pages[index]);
>      |         ^~~~~~
>/home/runner/work/dpdk/dpdk/lib/pmu/rte_pmu.h:86:1: note: declared here
>   86 | __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
>      | ^~~~~~~~~~~~~~~~~~~~~~~
>cc1: all warnings being treated as errors [3385/6468] Compiling C object
>buildtools/chkincs/chkincs.p/meson-generated_rte_byteorder.c.o
>[3386/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_atomic.c.o
>[3387/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_rtm.c.o
>[3388/6468] Compiling C object buildtools/chkincs/chkincs.p/meson-generated_rte_memcpy.c.o
>[3389/6468] Compiling C object app/dpdk-test.p/test_test_memcpy_perf.c.o
>ninja: build stopped: subcommand failed.
>##[error]Process completed with exit code 1.

Right, this indeed pops up with -Dcheck_includes=true. Will fix this in v16. 

Thanks.

^ permalink raw reply	[relevance 0%]

* RE: rte_fib network order bug
  @ 2024-11-15 13:52  3%               ` Morten Brørup
  2024-11-15 14:28  3%                 ` Robin Jarry
  0 siblings, 1 reply; 169+ results
From: Morten Brørup @ 2024-11-15 13:52 UTC (permalink / raw)
  To: Robin Jarry, Medvedkin, Vladimir, dev

> From: Robin Jarry [mailto:rjarry@redhat.com]
> Sent: Friday, 15 November 2024 14.02
> 
> Morten Brørup, Nov 14, 2024 at 15:35:
> >> RTE_IPV4 is only useful to define addresses in unit tests.
> >
> > There are plenty of special IP addresses and subnets, where a
> shortcut
> > macro makes the address easier readable in the code.
> 
> OK, let me reformulate. I didn't mean to say that RTE_IPV4 is useless.
> But it will always generate addresses in *host order*. Which means they
> cannot be used in IPv4 headers without passing them through htonl().
> This is weird in my opinion.

Robin, you've totally won me over on this endian discussion. :-)
Especially the IPv6 comparison make it clear why IPv4 should also be network byte order.

API/ABI stability is a pain... we're stuck with host endian IPv4 addresses; e.g. for the RTE_IPV4() macro, which I now agree produces the wrong endian value (on little endian CPUs).

> 
> >> Why would control plane use a different representation of addresses
> >> compared to data plane?
> >
> > Excellent question.
> > Old habit? Growing up using big endian CPUs, we have come to think of
> > IPv4 addresses as 32 bit numbers, so we keep treating them as such.
> > With this old way of thinking, the only reason to use network endian
> > in the fast path with little endian CPUs is for performance reasons
> > (to save the byte swap) - if not, we would still prefer using host
> > endian in the fast path too.
> 
> I understand the implementation reasons why you would prefer working
> with host order integers. But the APIs that deal with IPv4 addresses
> should not reflect implementation details.

They were probably designed based on the same way of thinking I was used to (until you convinced me I was wrong).

> 
> >> Also for consistency with IPv6, I really think
> >> that *all* addresses should be dealt in their network form.
> >
> > Food for thought!
> 
> Vladimir, could we at least consider adding a real network order mode
> for the rib and fib libraries? So that we can have consistent APIs
> between IPv4 and IPv6?

And/or rename RTE_FIB_F_NETWORK_ORDER to RTE_FIB_F_NETWORK_ORDER_LOOKUP or similar. This is important if real network order mode is added (now or later)!

> 
> On that same topic, I wonder if it would make sense to change the API
> parameters to use an opaque rte_ipv4_addr_t type instead of a native
> uint32_t to avoid any confusion.

It could be considered an IPv4 address type (like the IPv6 address type) (which should be in network endian), which it is not, so I don't like this idea.
What the API really should offer is a choice (or a union) of uint32_t and rte_be32_t, but that's not possible, so also using uint32_t for big endian values seems like a viable compromise.
Another alternative, using void* for the IPv4 address array, seems overkill to me, since compilers don't warn about mixing uint32_t with rte_be32_t values (like mixing signed and unsigned emits warnings).

> 
> Thanks!


^ permalink raw reply	[relevance 3%]

* Re: [PATCH v5 1/1] graph: improve node layout
  2024-11-15  1:55  5%         ` [PATCH v5 1/1] graph: improve node layout Huichao Cai
@ 2024-11-15 14:23  0%           ` Thomas Monjalon
  2024-11-15 15:57  0%             ` [EXTERNAL] " Jerin Jacob
  0 siblings, 1 reply; 169+ results
From: Thomas Monjalon @ 2024-11-15 14:23 UTC (permalink / raw)
  To: jerinj, ndabilpuram; +Cc: kirankumark, yanzhirun_163, dev, Huichao Cai

Is it good to go?


15/11/2024 02:55, Huichao Cai:
> The members "dispatch" and "xstat_off" of the structure "rte_node"
> can be min cache aligned to make room for future expansion and to
> make sure have better performance. Add corresponding comments.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
>  doc/guides/rel_notes/release_24_11.rst |  2 ++
>  lib/graph/rte_graph_worker_common.h    | 10 +++++++---
>  2 files changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
> index 5063badf39..32800e8cb0 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -491,6 +491,8 @@ ABI Changes
>    added new structure ``rte_node_xstats`` to ``rte_node_register`` and
>    added ``xstat_off`` to ``rte_node``.
>  
> +* graph: The members ``dispatch`` and ``xstat_off`` of the structure ``rte_node`` have been
> +  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
>  
>  Known Issues
>  ------------
> diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
> index a518af2b2a..d3ec88519d 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -104,16 +104,20 @@ struct __rte_cache_aligned rte_node {
>  	/** Original process function when pcap is enabled. */
>  	rte_node_process_t original_process;
>  
> +	/** Fast schedule area for mcore dispatch model. */
>  	union {
> -		/* Fast schedule area for mcore dispatch model */
> -		struct {
> +		alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
>  			unsigned int lcore_id;  /**< Node running lcore. */
>  			uint64_t total_sched_objs; /**< Number of objects scheduled. */
>  			uint64_t total_sched_fail; /**< Number of scheduled failure. */
>  		} dispatch;
>  	};
> +
> +	/** Fast path area cache line 1. */
> +	alignas(RTE_CACHE_LINE_MIN_SIZE)
>  	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> -	/* Fast path area  */
> +
> +	/** Fast path area cache line 2. */
>  	__extension__ struct __rte_cache_aligned {
>  #define RTE_NODE_CTX_SZ 16
>  		union {
> 






^ permalink raw reply	[relevance 0%]

* Re: rte_fib network order bug
  2024-11-15 13:52  3%               ` Morten Brørup
@ 2024-11-15 14:28  3%                 ` Robin Jarry
  2024-11-15 16:20  0%                   ` Stephen Hemminger
  0 siblings, 1 reply; 169+ results
From: Robin Jarry @ 2024-11-15 14:28 UTC (permalink / raw)
  To: Morten Brørup, Medvedkin, Vladimir, dev

Morten Brørup, Nov 15, 2024 at 14:52:
> Robin, you've totally won me over on this endian discussion. :-)
> Especially the IPv6 comparison make it clear why IPv4 should also be 
> network byte order.
>
> API/ABI stability is a pain... we're stuck with host endian IPv4 
> addresses; e.g. for the RTE_IPV4() macro, which I now agree produces 
> the wrong endian value (on little endian CPUs).

At least for 24.11 it is too late. But maybe we could make it right for 
the next LTS?

>> Vladimir, could we at least consider adding a real network order mode 
>> for the rib and fib libraries? So that we can have consistent APIs 
>> between IPv4 and IPv6?
>
> And/or rename RTE_FIB_F_NETWORK_ORDER to 
> RTE_FIB_F_NETWORK_ORDER_LOOKUP or similar. This is important if real 
> network order mode is added (now or later)!

Maybe we could revert that patch and defer a complete change of the 
rib/fib APIs to only expose network order addresses? It would be an ABI 
breakage but if properly announced in advance, it should be possible.

Thinking about it some more. Having a flag for such a drastic change in 
behaviour does not seem right.

>> On that same topic, I wonder if it would make sense to change the API 
>> parameters to use an opaque rte_ipv4_addr_t type instead of a native 
>> uint32_t to avoid any confusion.
>
> It could be considered an IPv4 address type (like the IPv6 address 
> type) (which should be in network endian), which it is not, so I don't 
> like this idea.
>
> What the API really should offer is a choice (or a union) of uint32_t 
> and rte_be32_t, but that's not possible, so also using uint32_t for 
> big endian values seems like a viable compromise.
>
> Another alternative, using void* for the IPv4 address array, seems 
> overkill to me, since compilers don't warn about mixing uint32_t with 
> rte_be32_t values (like mixing signed and unsigned emits warnings).

If what I proposed above is possible, then all these APIs could be using 
rte_be32_t values (or even better, an rte_ipv4_addr_t alias for 
consistency with IPv6). That would make everything much simpler.

Thoughts?


^ permalink raw reply	[relevance 3%]

* RE: [EXTERNAL] Re: [PATCH v5 1/1] graph: improve node layout
  2024-11-15 14:23  0%           ` Thomas Monjalon
@ 2024-11-15 15:57  0%             ` Jerin Jacob
  0 siblings, 0 replies; 169+ results
From: Jerin Jacob @ 2024-11-15 15:57 UTC (permalink / raw)
  To: Thomas Monjalon, Nithin Kumar Dabilpuram
  Cc: Kiran Kumar Kokkilagadda, yanzhirun_163, dev, Huichao Cai



> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Friday, November 15, 2024 7:54 PM
> To: Jerin Jacob <jerinj@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>
> Cc: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>;
> yanzhirun_163@163.com; dev@dpdk.org; Huichao Cai <chcchc88@163.com>
> Subject: [EXTERNAL] Re: [PATCH v5 1/1] graph: improve node layout
> 
> Is it good to go? 15/11/2024 02: 55, Huichao Cai: > The members "dispatch"
> and "xstat_off" of the structure "rte_node" > can be min cache aligned to make
> room for future expansion and to > make sure have better performance. Add
> corresponding 
> Is it good to go?
> 
> 
> 15/11/2024 02:55, Huichao Cai:
> > The members "dispatch" and "xstat_off" of the structure "rte_node"
> > can be min cache aligned to make room for future expansion and to make
> > sure have better performance. Add corresponding comments.
> >
> > Signed-off-by: Huichao Cai <chcchc88@163.com>]


Acked-by: Jerin Jacob <jerinj@marvell.com>


> > ---
> >  doc/guides/rel_notes/release_24_11.rst |  2 ++
> >  lib/graph/rte_graph_worker_common.h    | 10 +++++++---
> >  2 files changed, 9 insertions(+), 3 deletions(-)
> >
> > diff --git a/doc/guides/rel_notes/release_24_11.rst
> > b/doc/guides/rel_notes/release_24_11.rst
> > index 5063badf39..32800e8cb0 100644
> > --- a/doc/guides/rel_notes/release_24_11.rst
> > +++ b/doc/guides/rel_notes/release_24_11.rst
> > @@ -491,6 +491,8 @@ ABI Changes
> >    added new structure ``rte_node_xstats`` to ``rte_node_register`` and
> >    added ``xstat_off`` to ``rte_node``.
> >
> > +* graph: The members ``dispatch`` and ``xstat_off`` of the structure
> > +``rte_node`` have been
> > +  marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned.
> >
> >  Known Issues
> >  ------------
> > diff --git a/lib/graph/rte_graph_worker_common.h
> > b/lib/graph/rte_graph_worker_common.h
> > index a518af2b2a..d3ec88519d 100644
> > --- a/lib/graph/rte_graph_worker_common.h
> > +++ b/lib/graph/rte_graph_worker_common.h
> > @@ -104,16 +104,20 @@ struct __rte_cache_aligned rte_node {
> >  	/** Original process function when pcap is enabled. */
> >  	rte_node_process_t original_process;
> >
> > +	/** Fast schedule area for mcore dispatch model. */
> >  	union {
> > -		/* Fast schedule area for mcore dispatch model */
> > -		struct {
> > +		alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
> >  			unsigned int lcore_id;  /**< Node running lcore. */
> >  			uint64_t total_sched_objs; /**< Number of objects
> scheduled. */
> >  			uint64_t total_sched_fail; /**< Number of scheduled
> failure. */
> >  		} dispatch;
> >  	};
> > +
> > +	/** Fast path area cache line 1. */
> > +	alignas(RTE_CACHE_LINE_MIN_SIZE)
> >  	rte_graph_off_t xstat_off; /**< Offset to xstat counters. */
> > -	/* Fast path area  */
> > +
> > +	/** Fast path area cache line 2. */
> >  	__extension__ struct __rte_cache_aligned {  #define RTE_NODE_CTX_SZ
> > 16
> >  		union {
> >
> 
> 
> 
> 


^ permalink raw reply	[relevance 0%]

* Re: rte_fib network order bug
  2024-11-15 14:28  3%                 ` Robin Jarry
@ 2024-11-15 16:20  0%                   ` Stephen Hemminger
  2024-11-17 15:04  3%                     ` Vladimir Medvedkin
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-11-15 16:20 UTC (permalink / raw)
  To: Robin Jarry; +Cc: Morten Brørup, Medvedkin, Vladimir, dev

On Fri, 15 Nov 2024 15:28:33 +0100
"Robin Jarry" <rjarry@redhat.com> wrote:

> Morten Brørup, Nov 15, 2024 at 14:52:
> > Robin, you've totally won me over on this endian discussion. :-)
> > Especially the IPv6 comparison make it clear why IPv4 should also be 
> > network byte order.
> >
> > API/ABI stability is a pain... we're stuck with host endian IPv4 
> > addresses; e.g. for the RTE_IPV4() macro, which I now agree produces 
> > the wrong endian value (on little endian CPUs).  
> 
> At least for 24.11 it is too late. But maybe we could make it right for 
> the next LTS?
> 
> >> Vladimir, could we at least consider adding a real network order mode 
> >> for the rib and fib libraries? So that we can have consistent APIs 
> >> between IPv4 and IPv6?  
> >
> > And/or rename RTE_FIB_F_NETWORK_ORDER to 
> > RTE_FIB_F_NETWORK_ORDER_LOOKUP or similar. This is important if real 
> > network order mode is added (now or later)!  
> 
> Maybe we could revert that patch and defer a complete change of the 
> rib/fib APIs to only expose network order addresses? It would be an ABI 
> breakage but if properly announced in advance, it should be possible.
> 
> Thinking about it some more. Having a flag for such a drastic change in 
> behaviour does not seem right.

It was a mistake for DPDK to define its own data structures for IP addresses.
Would have been much better to stick with the legacy what BSD, Linux (and Windows)
uses in API. 'struct in_addr' and 'struct in6_addr'

Reinvention did not help users.

^ permalink raw reply	[relevance 0%]

* Re: rte_fib network order bug
  2024-11-15 16:20  0%                   ` Stephen Hemminger
@ 2024-11-17 15:04  3%                     ` Vladimir Medvedkin
  0 siblings, 0 replies; 169+ results
From: Vladimir Medvedkin @ 2024-11-17 15:04 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Robin Jarry, Morten Brørup, Medvedkin, Vladimir, dev

[-- Attachment #1: Type: text/plain, Size: 4983 bytes --]

Hi all,

[Robin] > I had not understood that it was *only* the lookups that were
network order
[Morten] >When I saw the byte order flag the first time, it was not clear
to me either that it only affected lookups - I too thought it covered the
entire API of the library. This needs to be emphasized in the description
of the flag. And the flag's name should contain LOOKUP
[Morten] > And/or rename RTE_FIB_F_NETWORK_ORDER to
RTE_FIB_F_NETWORK_ORDER_LOOKUP or similar.

There is a clear comment for this flag that it has effects on lookup.
Repeating the statement with an exclamation mark seems too much. Moreover,
at first this flag was named "RTE_FIB_FLAG_LOOKUP_BE" and it was suggested
for renaming here:
https://inbox.dpdk.org/dev/D4SWPKOPRD5Z.87YIET3Y4AW@redhat.com/

[Morten] >Control plane API should use CPU byte order ... adding it
(support for network byte order) to the RIB library would be nice too.
I'm not sure if I understood you correctly here, RIB is a control plane
library.

[Robin] > an IPv4 address is *not* an integer. It should be treated as an
opaque value.
I don't agree here. IPv4 is 32 bits of information. CPUs usually can treat
32 bits of information as an integer, which is really useful.

[Morten] > Treating IPv4 addresses as byte arrays would allow simple
memcmp() for range comparison
How is it possible for a general case? For example, I need to test IP
addresses against range 1.1.1.7 - 10.20.30.37.

[Robin] >Also for consistency with IPv6, I really think that *all*
addresses should be dealt in their network form.
There is no such a problem as byte order mismatch for IPv6 addresses since
they can not be treated by modern CPUs as an native integer type.

[Robin] >But it (RTE_IPV4) will always generate addresses in *host order*.
Which means they cannot be used in IPv4 headers without passing them
through htonl().
RTE_IPV4 is not limited by setting IPv4 headers values.

[Robin] >Maybe we could revert that patch and defer a complete change of
the rib/fib APIs to only expose network order addresses?
I don't agree with that. Don't limit yourself to just manipulating network
headers.

[Robin] >Thinking about it some more. Having a flag for such a drastic
change in behaviour does not seem right.
This flag is optional. I don't see any problems with that.

In general, here we just have different perspectives on the problem. I can
see and understand your point.
My considerations are:
- The vast majority of the longest prefix match algorithms works with
addresses in host byte order (binary trees, multibit tries, DXR, except
only hash based lookup)
- If you do byteswap two or more times - If you run byteswap two or more
times, you are probably doing something wrong in terms of computations

So, feel free to submit patches adding this feature to the control plane
API, but let's consider:
- default behaviour should remain the same. Why? At least because for my
usecases I'd like to have "data representation" (byte swap) outside of the
library. Not to mention ABI/API breakage
-  IPv4 should stay as uint32_t. C doesn't know such a thing as byte order,
it knows about size and signedness. rte_be32_t is just a hint for us -
humans :)


пт, 15 нояб. 2024 г. в 17:00, Stephen Hemminger <stephen@networkplumber.org
>:

> On Fri, 15 Nov 2024 15:28:33 +0100
> "Robin Jarry" <rjarry@redhat.com> wrote:
>
> > Morten Brørup, Nov 15, 2024 at 14:52:
> > > Robin, you've totally won me over on this endian discussion. :-)
> > > Especially the IPv6 comparison make it clear why IPv4 should also be
> > > network byte order.
> > >
> > > API/ABI stability is a pain... we're stuck with host endian IPv4
> > > addresses; e.g. for the RTE_IPV4() macro, which I now agree produces
> > > the wrong endian value (on little endian CPUs).
> >
> > At least for 24.11 it is too late. But maybe we could make it right for
> > the next LTS?
> >
> > >> Vladimir, could we at least consider adding a real network order mode
> > >> for the rib and fib libraries? So that we can have consistent APIs
> > >> between IPv4 and IPv6?
> > >
> > > And/or rename RTE_FIB_F_NETWORK_ORDER to
> > > RTE_FIB_F_NETWORK_ORDER_LOOKUP or similar. This is important if real
> > > network order mode is added (now or later)!
> >
> > Maybe we could revert that patch and defer a complete change of the
> > rib/fib APIs to only expose network order addresses? It would be an ABI
> > breakage but if properly announced in advance, it should be possible.
> >
> > Thinking about it some more. Having a flag for such a drastic change in
> > behaviour does not seem right.
>
> It was a mistake for DPDK to define its own data structures for IP
> addresses.
> Would have been much better to stick with the legacy what BSD, Linux (and
> Windows)
> uses in API. 'struct in_addr' and 'struct in6_addr'
>
> Reinvention did not help users.
>


-- 
Regards,
Vladimir

[-- Attachment #2: Type: text/html, Size: 11432 bytes --]

^ permalink raw reply	[relevance 3%]

* Tech Board Meeting Minutes - 2024-Nov-13
@ 2024-11-20 22:24  3% Honnappa Nagarahalli
  0 siblings, 0 replies; 169+ results
From: Honnappa Nagarahalli @ 2024-11-20 22:24 UTC (permalink / raw)
  To: techboard, dev; +Cc: nd


Members Attending
-------------------------
Aaron Conole
Bruce Richardson
Hemant Agrawal
Jerin Jacob
Kevin Traynor
Konstantin Ananyev
Maxime Coquelin
Morten Brørup
Stephen Hemminger
Thomas Monjalon

NOTE: The technical board meetings are on every second Wednesday at 3pm
UTC.  Meetings are public, and DPDK community members are welcome to
attend.  Agenda and minutes can be found at http://core.dpdk.org/techboard/minutes

Next meeting will be on Wednesday 2024-Nov-27 @ 3pm UTC, and will be chaired by Hemant Agrawal

Agenda Items
============
1) A DPDK summit in Prague approved. The tentative dates are in May 7th to 8th or 21st to 22nd 2025.
 
2) Did we write down all ideas from the Montreal brainstorm (public + techboard)?
Honnappa has summarized and sent this to Techboard.
Action Item: Honnappa to add the list to the slides in Google Docs (https://docs.google.com/presentation/d/1TDmz1_xvWFWxrMtXgKA03e_4yPUUTWdJ7aWV9_BBUhE/edit?usp=sharing)
 
3) Did we summarize ideas coming from email and Slack?
Stephen has summarized these.
Action Item: Stephen to add these to the slides mentioned above. Create a excel sheet and send it to Techboard for voting.
 
4)Next Steps
   a) Need sizing for these challenges and Mentors
   b) Govboard has approved $10,000 for coding challenge prize. The coding challenge could happen in conjunction with that.
   c) Stephen, Ben and possibly Nathan would lead this effort. Once the mentors are decided, mentors will manage the individual coding challenge. The prize money will be decided once the priority is decided. Tentatively - publish 10 challenges and select 3 best.
   d) Can we send a teaser in advance?
 
5) PVS Studio has posted a blog on issues found in static analysis, worth look - https://pvs-studio.com/en/blog/posts/cpp/1183/
 
6) Status of 24.11 release – RC2 released. Lots of features introduced. so_ring will be merged after this release. Lcore variable feature from Mathias is an interesting feature. One concern is the amount of memory it uses. The memory is not coming from Hugepages. It allocates 128KB per lcore. This is merged but not marked as Experimental this will allow us to make changes even if it breaks ABI.
Action Item: Patrick to check if it is possible to add some test cases in CI pipeline to warn changes to memory usage caused by a patch.
Action Item: Thomas to mark this feature as Experimental.
 
7) The documented process for merging is to have at least 2 reviews. However, the reality seems different, things get merged before the release. Thomas and David review the patches during the release process if there were few reviews. We could have list of patches that need review at some location (dpdk.org?).
 

^ permalink raw reply	[relevance 3%]

* [PATCH v1 0/4] Adjust wording for NUMA vs. socket ID in DPDK
@ 2024-11-26 13:14  3% Anatoly Burakov
  0 siblings, 0 replies; 169+ results
From: Anatoly Burakov @ 2024-11-26 13:14 UTC (permalink / raw)
  To: dev

While initially, DPDK has used the term "socket ID" to refer to physical package
ID, the last time DPDK read "physical_package_id" for socket ID was ~9 years
ago, so it's been a while since we've actually switched over to using the term
"socket" to mean "NUMA node".

This wasn't a problem before, as most systems had one NUMA node per physical
socket. However, in the last few years, more and more systems have multiple NUMA
nodes per physical CPU socket. Since DPDK used NUMA nodes already, the
transition was pretty seamless, however now we're faced with a situation when
most of our documentation still uses outdated terms, and our API is ripe with
references to "sockets" when in actuality we mean "NUMA nodes". This could be a
source of confusion.

While completely renaming all of our API's would be a huge effort, will take a
long time and arguably wouldn't even be worth the API breakages (given that this
mismatch between terminology and reality is implicitly understood by most people
working on DPDK, and so this isn't so much of a problem in practice), we can do
some tweaks around the edges and at least document this unfortunate reality.

This patchset suggests the following changes:

- Update rte_socket/rte_lcore documentation to refer to NUMA nodes rather than
sockets
- Rename internal structures' fields to better reflect this intention
- Rename --socket-mem/--socket-limit flags to refer to NUMA rather than sockets

The documentation is updated to refer to new EAL flags, but is otherwise left
untouched, and instead the entry in "glossary" is amended to indicate that when
DPDK documentation refers to "sockets", it actually means "NUMA ID's". As next
steps, we could rename all API parameters to refer to NUMA ID rather than socket
ID - this would not break neither API nor ABI, and instead would be a
documentation change in practice.

RFCv1 -> v1:
- Dropped patch 5
- Updated error messages in patch 4 to refer to old flags as well

Anatoly Burakov (4):
  eal: update socket ID API documentation
  lcore: rename socket ID to NUMA ID
  eal: rename socket ID to NUMA ID in internal config
  eal: rename --socket-mem/--socket-limit

 doc/guides/faq/faq.rst                        |  4 +--
 doc/guides/howto/lm_bond_virtio_sriov.rst     |  2 +-
 doc/guides/howto/lm_virtio_vhost_user.rst     |  2 +-
 doc/guides/howto/pvp_reference_benchmark.rst  |  4 +--
 .../virtio_user_for_container_networking.rst  |  2 +-
 doc/guides/linux_gsg/build_sample_apps.rst    | 20 +++++------
 doc/guides/linux_gsg/linux_eal_parameters.rst | 16 ++++-----
 doc/guides/nics/mlx4.rst                      |  2 +-
 doc/guides/nics/mlx5.rst                      |  2 +-
 .../prog_guide/env_abstraction_layer.rst      | 12 +++----
 doc/guides/prog_guide/glossary.rst            |  5 ++-
 doc/guides/prog_guide/multi_proc_support.rst  |  2 +-
 doc/guides/sample_app_ug/bbdev_app.rst        |  6 ++--
 doc/guides/sample_app_ug/ipsec_secgw.rst      |  6 ++--
 doc/guides/sample_app_ug/vdpa.rst             |  2 +-
 doc/guides/sample_app_ug/vhost.rst            |  4 +--
 lib/eal/common/eal_common_dynmem.c            | 14 ++++----
 lib/eal/common/eal_common_lcore.c             | 10 +++---
 lib/eal/common/eal_common_options.c           | 33 ++++++++++---------
 lib/eal/common/eal_common_thread.c            | 12 +++----
 lib/eal/common/eal_internal_cfg.h             | 10 +++---
 lib/eal/common/eal_options.h                  |  8 +++--
 lib/eal/common/eal_private.h                  |  2 +-
 lib/eal/common/malloc_heap.c                  |  2 +-
 lib/eal/freebsd/eal.c                         |  2 +-
 lib/eal/include/rte_lcore.h                   | 25 +++++++-------
 lib/eal/linux/eal.c                           | 28 +++++++++-------
 lib/eal/linux/eal_memory.c                    | 22 ++++++-------
 lib/eal/windows/eal.c                         |  2 +-
 29 files changed, 137 insertions(+), 124 deletions(-)

-- 
2.43.5


^ permalink raw reply	[relevance 3%]

* Re: [PATCH] doc: correct definition of Stats per queue feature
  @ 2024-11-26 23:39  0%   ` Thomas Monjalon
  0 siblings, 0 replies; 169+ results
From: Thomas Monjalon @ 2024-11-26 23:39 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: dev, Shreyansh Jain, John McNamara, Andrew Rybchenko, Ferruh Yigit

11/10/2024 21:25, Ferruh Yigit:
> On 10/11/2024 2:38 AM, Stephen Hemminger wrote:
> > Change the documentation to match current usage of this feature
> > in the NIC table. Moved this sub heading to be after basic
> > stats because the queue stats reported now are in the same structure.
> > 
> > Although the "Stats per Queue" feature was originally intended
> > to be related to stats mapping, the overwhelming majority of drivers
> > report this feature with a different meaning.
> > 
> > Hopefully in later release the per-queue stats limitations
> > can be fixed, but this requires and API, ABI, and lots of driver
> > changes.
> > 
> > Fixes: dad1ec72a377 ("doc: document NIC features")
> > Cc: ferruh.yigit@intel.com
> > Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> 
> Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>

Applied with spacing fixed, thanks.




^ permalink raw reply	[relevance 0%]

* [PATCH v1] doc: update release notes for 24.11
@ 2024-11-28 17:07  4% John McNamara
  0 siblings, 0 replies; 169+ results
From: John McNamara @ 2024-11-28 17:07 UTC (permalink / raw)
  To: dev; +Cc: thomas, John McNamara

Fix grammar, spelling and formatting of DPDK 24.11 release notes.

Signed-off-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/rel_notes/release_24_11.rst | 158 +++++++++++++++----------
 1 file changed, 93 insertions(+), 65 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst
index 48b399cda7..b7e0f1224b 100644
--- a/doc/guides/rel_notes/release_24_11.rst
+++ b/doc/guides/rel_notes/release_24_11.rst
@@ -57,14 +57,14 @@ New Features
 
 * **Added new bit manipulation API.**
 
-  The support for bit-level operations on single 32- and 64-bit words in
-  <rte_bitops.h> has been extended with semantically well-defined functions.
+  Extended support for bit-level operations on single 32 and 64-bit words in
+  ``<rte_bitops.h>`` with semantically well-defined functions.
 
   * ``rte_bit_[test|set|clear|assign|flip]`` functions provide excellent
     performance (by avoiding restricting the compiler and CPU), but give
-    no guarantees in regards to memory ordering or atomicity.
+    no guarantees in relation to memory ordering or atomicity.
 
-  * ``rte_bit_atomic_*`` provide atomic bit-level operations, including
+  * ``rte_bit_atomic_*`` provides atomic bit-level operations including
     the possibility to specify memory ordering constraints.
 
   The new public API elements are polymorphic, using the _Generic-based
@@ -72,15 +72,17 @@ New Features
 
 * **Added multi-word bitset API.**
 
-  A new multi-word bitset API has been introduced in the EAL.
+  Introduced a new multi-word bitset API to the EAL.
+
   The RTE bitset is optimized for scenarios where the bitset size exceeds the
   capacity of a single word (e.g., larger than 64 bits), but is not large
   enough to justify the overhead and complexity of the more scalable,
-  yet slower, <rte_bitmap.h> API.
+  yet slower, ``<rte_bitmap.h>`` API.
+
   This addition provides an efficient and straightforward alternative
-  for handling bitsets of intermediate sizes.
+  for handling bitsets of intermediate size.
 
-* **Added per-lcore static memory allocation facility.**
+* **Added a per-lcore static memory allocation facility.**
 
   Added EAL API ``<rte_lcore_var.h>`` for statically allocating small,
   frequently-accessed data structures, for which one instance should exist
@@ -89,10 +91,10 @@ New Features
   With lcore variables, data is organized spatially on a per-lcore id basis,
   rather than per library or PMD, avoiding the need for cache aligning
   (or RTE_CACHE_GUARDing) data structures, which in turn
-  reduces CPU cache internal fragmentation, improving performance.
+  reduces CPU cache internal fragmentation and improves performance.
 
   Lcore variables are similar to thread-local storage (TLS, e.g. C11 ``_Thread_local``),
-  but decoupling the values' life time from that of the threads.
+  but decouples the values' life times from those of the threads.
 
 * **Extended service cores statistics.**
 
@@ -101,7 +103,7 @@ New Features
   * ``RTE_SERVICE_ATTR_IDLE_CALL_COUNT`` tracks the number of service function
     invocations where no actual work was performed.
 
-  * ``RTE_SERVICE_ATTR_ERROR_CALL_COUNT`` tracks the number invocations
+  * ``RTE_SERVICE_ATTR_ERROR_CALL_COUNT`` tracks the number of invocations
     resulting in an error.
 
   The new statistics are useful for debugging and profiling.
@@ -110,17 +112,17 @@ New Features
 
   Added function attributes to ``rte_malloc`` and similar functions
   that can catch some obvious bugs at compile time (with GCC 11.0 or later).
-  Examples: calling ``free`` on pointer that was allocated with ``rte_malloc``
-  (and vice versa); freeing the same pointer twice in the same routine;
-  freeing an object that was not created by allocation; etc.
+  For example, calling ``free`` on a pointer that was allocated with ``rte_malloc``
+  (and vice versa); freeing the same pointer twice in the same routine or
+  freeing an object that was not created by allocation.
 
-* **Updated logging library**
+* **Updated logging library.**
 
   * The log subsystem is initialized earlier in startup so all messages go through the library.
 
   * If the application is a systemd service and the log output is being sent to standard error
     then DPDK will switch to journal native protocol.
-    This allows the more data such as severity to be sent.
+    This allows more data such as severity to be sent.
 
   * The syslog option has changed.
     By default, messages are no longer sent to syslog unless the ``--syslog`` option is specified.
@@ -136,7 +138,7 @@ New Features
 
 * **Added more ICMP message types and codes.**
 
-  New ICMP message types and codes from RFC 792 were added in ``rte_icmp.h``.
+  Added new ICMP message types and codes from RFC 792 in ``rte_icmp.h``.
 
 * **Added IPv6 address structure and related utilities.**
 
@@ -154,7 +156,7 @@ New Features
 
 * **Extended flow table index features.**
 
-  * Extended the flow table insertion type enum with
+  * Extended the flow table insertion type enum with the
     ``RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN`` type.
   * Added a function for inserting a flow rule by index with pattern:
     ``rte_flow_async_create_by_index_with_pattern()``.
@@ -171,8 +173,8 @@ New Features
 
   * Modified the PMD API that controls the LLQ header policy.
   * Replaced ``enable_llq``, ``normal_llq_hdr`` and ``large_llq_hdr`` devargs
-    with a new shared devarg ``llq_policy`` that keeps the same logic.
-  * Added validation check for Rx packet descriptor consistency.
+    with a new shared devarg ``llq_policy`` that maintains the same logic.
+  * Added a validation check for Rx packet descriptor consistency.
 
 * **Updated Cisco enic driver.**
 
@@ -187,17 +189,19 @@ New Features
 
   * Updated supported version of the FPGA to 9563.55.49.
   * Extended and fixed logging.
-  * Added NT flow filter initialization.
-  * Added NT flow backend initialization.
-  * Added initialization of FPGA modules related to flow HW offload.
-  * Added basic handling of the virtual queues.
-  * Added flow handling support.
-  * Added statistics support.
-  * Added age flow action support.
-  * Added meter flow metering and flow policy support.
-  * Added flow actions update support.
-  * Added asynchronous flow support.
-  * Added MTU update support.
+  * Added:
+
+    - NT flow filter initialization.
+    - NT flow backend initialization.
+    - Initialization of FPGA modules related to flow HW offload.
+    - Basic handling of the virtual queues.
+    - Flow handling support.
+    - Statistics support.
+    - Age flow action support.
+    - Meter flow metering and flow policy support.
+    - Flow actions update support.
+    - Asynchronous flow support.
+    - MTU update support.
 
 * **Updated NVIDIA mlx5 net driver.**
 
@@ -211,9 +215,10 @@ New Features
 
 * **Added ZTE zxdh net driver [EXPERIMENTAL].**
 
-  Added ethdev driver support for zxdh NX Series Ethernet Controller.
+  Added ethdev driver support for the zxdh NX Series Ethernet Controller.
+  This has:
 
-  * Ability to initialize the NIC.
+  * The ability to initialize the NIC.
   * No datapath support.
 
 * **Added cryptodev queue pair reset support.**
@@ -232,9 +237,9 @@ New Features
 
 * **Updated IPsec_MB crypto driver.**
 
-  * Added support for SM3 algorithm.
-  * Added support for SM3 HMAC algorithm.
-  * Added support for SM4 CBC, SM4 ECB and SM4 CTR algorithms.
+  * Added support for the SM3 algorithm.
+  * Added support for the SM3 HMAC algorithm.
+  * Added support for the SM4 CBC, SM4 ECB and SM4 CTR algorithms.
   * Bumped the minimum version requirement of Intel IPsec Multi-buffer library to v1.4.
     Affected PMDs: KASUMI, SNOW3G, ZUC, AESNI GCM, AESNI MB and CHACHAPOLY.
 
@@ -264,7 +269,7 @@ New Features
 * **Added Marvell cnxk RVU LF rawdev driver.**
 
   Added a new raw device driver for Marvell cnxk based devices
-  to allow out-of-tree driver to manage RVU LF device.
+  to allow ans out-of-tree driver to manage a RVU LF device.
   It enables operations such as sending/receiving mailbox,
   register and notify the interrupts, etc.
 
@@ -286,7 +291,7 @@ New Features
 
   Added support for independent enqueue feature.
   With this feature eventdev supports enqueue in any order
-  or specifically in a different order than dequeue.
+  or specifically in a different order to dequeue.
   The feature is intended for eventdevs supporting burst mode.
   Applications should use ``RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ`` to enable
   the feature if the capability ``RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ`` exists.
@@ -305,8 +310,8 @@ New Features
 
 * **Added IPv4 network order lookup in the FIB library.**
 
-  A new flag field is introduced in ``rte_fib_conf`` structure.
-  This field is used to pass an extra configuration settings such as ability
+  A new flag field is introduced in the ``rte_fib_conf`` structure.
+  This field is used to pass an extra configuration settings such as the ability
   to lookup IPv4 addresses in network byte order.
 
 * **Added RSS hash key generating API.**
@@ -317,7 +322,7 @@ New Features
 * **Added per-CPU power management QoS interface.**
 
   Added per-CPU PM QoS interface to lower the resume latency
-  when wake up from idle state.
+  when waking up from idle state.
 
 * **Added new API to register telemetry endpoint callbacks with private arguments.**
 
@@ -326,7 +331,7 @@ New Features
 
 * **Added node specific statistics.**
 
-  Added ability for node to advertise and update multiple xstat counters,
+  Added ability for ans node to advertise and update multiple xstat counters,
   that can be retrieved using ``rte_graph_cluster_stats_get``.
 
 
@@ -342,7 +347,7 @@ Removed Items
    Also, make sure to start the actual text at the margin.
    =======================================================
 
-* ethdev: Removed the __rte_ethdev_trace_rx_burst symbol, as the corresponding
+* ethdev: Removed the ``__rte_ethdev_trace_rx_burst`` symbol, as the corresponding
   tracepoint was split into two separate ones for empty and non-empty calls.
 
 
@@ -363,8 +368,8 @@ API Changes
 
 * kvargs: reworked the process API.
 
-  * The already existing ``rte_kvargs_process`` now only handles key=value cases and
-    rejects if only a key is present in the parsed string.
+  * The already existing ``rte_kvargs_process`` now only handles ``key=value`` cases and
+    rejects input where only a key is present in the parsed string.
   * ``rte_kvargs_process_opt`` has been added to behave as ``rte_kvargs_process`` in previous
     releases: it handles key=value and only-key cases.
   * Both ``rte_kvargs_process`` and ``rte_kvargs_process_opt`` reject a NULL ``kvlist`` parameter.
@@ -381,24 +386,35 @@ API Changes
 * net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays.
   The following libraries and symbols were modified:
 
-  cmdline
+  - cmdline:
+
     - ``cmdline_ipaddr_t``
-  ethdev
+
+  - ethdev:
+
     - ``struct rte_flow_action_set_ipv6``
     - ``struct rte_flow_item_icmp6_nd_na``
     - ``struct rte_flow_item_icmp6_nd_ns``
     - ``struct rte_flow_tunnel``
-  fib
+
+  - fib:
+
     - ``rte_fib6_add()``
     - ``rte_fib6_delete()``
     - ``rte_fib6_lookup_bulk()``
     - ``RTE_FIB6_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
     - ``RTE_FIB6_MAXDEPTH`` (deprecated, replaced with ``RTE_IPV6_MAX_DEPTH``)
-  hash
+
+  - hash:
+
     - ``struct rte_ipv6_tuple``
-  ipsec
+
+  - ipsec:
+
     - ``struct rte_ipsec_sadv6_key``
-  lpm
+
+  - lpm:
+
     - ``rte_lpm6_add()``
     - ``rte_lpm6_delete()``
     - ``rte_lpm6_delete_bulk_func()``
@@ -407,20 +423,32 @@ API Changes
     - ``rte_lpm6_lookup_bulk_func()``
     - ``RTE_LPM6_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
     - ``RTE_LPM6_MAX_DEPTH`` (deprecated, replaced with ``RTE_IPV6_MAX_DEPTH``)
-  net
+
+  - net:
+
     - ``struct rte_ipv6_hdr``
-  node
+
+  - node:
+
     - ``rte_node_ip6_route_add()``
-  pipeline
+
+  - pipeline:
+
     - ``struct rte_swx_ipsec_sa_encap_params``
     - ``struct rte_table_action_ipv6_header``
     - ``struct rte_table_action_nat_params``
-  security
+
+  - security:
+
     - ``struct rte_security_ipsec_tunnel_param``
-  table
+
+  - table:
+
     - ``struct rte_table_lpm_ipv6_key``
     - ``RTE_LPM_IPV6_ADDR_SIZE`` (deprecated, replaced with ``RTE_IPV6_ADDR_SIZE``)
-  rib
+
+  - rib:
+
     - ``rte_rib6_get_ip()``
     - ``rte_rib6_get_nxt()``
     - ``rte_rib6_insert()``
@@ -452,7 +480,7 @@ ABI Changes
    =======================================================
 
 * eal: The maximum number of file descriptors that can be passed to a secondary process
-  has been increased from 8 to 253 (which is the maximum possible with Unix domain socket).
+  has been increased from 8 to 253 (which is the maximum possible with Unix domain sockets).
   This allows for more queues when using software devices such as TAP and XDP.
 
 * ethdev: Added ``filter`` and ``names`` fields to ``rte_dev_reg_info`` structure
@@ -468,25 +496,25 @@ ABI Changes
 * cryptodev: The enum ``rte_crypto_asym_xform_type`` and struct ``rte_crypto_asym_op``
   are updated to include new values to support EdDSA.
 
-* cryptodev: The ``rte_crypto_rsa_xform`` struct member to hold private key
-  in either exponent or quintuple format is changed from union to struct data type.
+* cryptodev: The ``rte_crypto_rsa_xform`` struct member to hold private key data
+  in either exponent or quintuple format is changed from a union to a struct data type.
   This change is to support ASN.1 syntax (RFC 3447 Appendix A.1.2).
 
 * cryptodev: The padding struct ``rte_crypto_rsa_padding`` is moved
   from ``rte_crypto_rsa_op_param`` to ``rte_crypto_rsa_xform``
   as the padding information is part of session creation
-  instead of per packet crypto operation.
+  instead of the per packet crypto operation.
   This change is required to support virtio-crypto specifications.
 
 * bbdev: The structure ``rte_bbdev_stats`` was updated to add a new parameter
-  to optionally report the number of enqueue batch available ``enqueue_depth_avail``.
+  to optionally report the number of enqueue batches available ``enqueue_depth_avail``.
 
-* dmadev: Added ``nb_priorities`` field to ``rte_dma_info`` structure
-  and ``priority`` field to ``rte_dma_conf`` structure
+* dmadev: Added ``nb_priorities`` field to the ``rte_dma_info`` structure
+  and ``priority`` field to the ``rte_dma_conf`` structure
   to get device supported priority levels
   and configure required priority from the application.
 
-* eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure.
+* eventdev: Added the ``preschedule_type`` field to ``rte_event_dev_config`` structure.
 
 * eventdev: Removed the single-event enqueue and dequeue function pointers
   from ``rte_event_fp_fps``.
-- 
2.34.1


^ permalink raw reply	[relevance 4%]

* DPDK 24.11 released
@ 2024-11-30 23:50  4% Thomas Monjalon
  0 siblings, 0 replies; 169+ results
From: Thomas Monjalon @ 2024-11-30 23:50 UTC (permalink / raw)
  To: announce

A new major release is available:
	https://fast.dpdk.org/rel/dpdk-24.11.tar.xz

It was a busy release cycle:
	1329 commits from 196 authors
	2557 files changed, 376587 insertions(+), 177108 deletions(-)

And it includes some API/ABI compatibility breakages.
This release won't be ABI-compatible with previous ones.
The new major ABI version is 25.
The next releases 25.03 and 25.07 will be ABI-compatible with 24.11.

The branch 24.11 should be supported for three years,
making it recommended for system integration and deployment.

Highlights of 24.11:
	- lcore variables allocation
	- bit set and atomic bit manipulation
	- AMD uncore power management
	- per-CPU power management QoS for resume latency
	- IPv6 address API
	- RSS hash key generation
	- Ethernet link lanes
	- flow table index action
	- Cisco enic VF
	- Marvell CN20K
	- Napatech ntnic flow engine
	- Realtek r8169 driver
	- ZTE gdtc / zxdh driver initialization
	- symmetric crypto SM4
	- asymmetric crypto EdDSA
	- event device pre-scheduling
	- event device independent enqueue
	- logging rework (timestamp, color, syslog, journal)

More details in the release notes:
	https://doc.dpdk.org/guides/rel_notes/release_24_11.html


There are 50 new contributors (including authors, reviewers and testers).
Welcome to Adel Belkhiri, Ahmed Zaki, Andre Muezerie, Andrzej Wilczynski,
Bartosz Jakub Rosadzinski, Bill Xiang, Chenxingyu Wang,
Danylo Vodopianov,  Dhruv Tripathi, Doug Foster, Gur Stavi, Hanxiao Li,
Howard Wang, Huaxing Zhu, Julien Hascoet, Jun Zhang, Junlong Wang,
Kiran Kumar Kokkilagadda, Luka Jankovic, Lukas Sismis, Lukasz Cieplicki,
Malcolm Bumgardner, Mateusz Polchlopek, Michal Jaron, Michal Nowak,
Midde Ajijur Rehaman, Mihai Brodschi, Niall Meade, Norbert Zulinski,
Ofer Dagan, Oleg Akhrem, Oleksandr Nahnybida, Peter Morrow,
Praveen Kaligineedi, Przemyslaw Gierszynski, Rogelio Domínguez Hernández,
Sangtani Parag Satishbhai, Slawomir Laba, Stefan Laesser,
Sudheer Mogilappagari, Thomas Wilks, Tim Martin, Tomáš Ďurovec,
Varun Lakkur Ambaji Rao, Vasuthevan Maheswaran, Vinod Krishna,
Wojciech Panfil, Xinying Yu, Yogesh Bhosale, and Yong Zhang.

Below is the number of commits per employer (with authors count):
	221     Intel (60)
	158     Marvell (22)
	137     Napatech (3)
	107     stephen@networkplumber.org (1)
	 96     NVIDIA (15)
	 93     NXP (10)
	 85     Red Hat (3)
	 70     Corigine (9)
	 70     Broadcom (15)
	 54     Huawei (6)
	 34     Arm (5)
	 30     Ericsson (1)
	        ...

A big thank to all courageous people who took on the non rewarding task
of reviewing other's job.
Based on Reviewed-by and Acked-by tags, the top non-PMD reviewers are:
	 73     Morten Brørup <mb@smartsharesystems.com>
	 66     Stephen Hemminger <stephen@networkplumber.org>
	 57     Chengwen Feng <fengchengwen@huawei.com>
	 41     Bruce Richardson <bruce.richardson@intel.com>
	 31     Luca Vizzarro <luca.vizzarro@arm.com>
	 26     Ferruh Yigit <ferruh.yigit@amd.com>
	 26     David Marchand <david.marchand@redhat.com>


The next version will be 25.03 in March.
The new features for 25.03 can be submitted during December:
	http://core.dpdk.org/roadmap#dates
Please share your roadmap.


Thanks everyone



^ permalink raw reply	[relevance 4%]

* Re: [PATCH v2 1/3] net: add thread-safe crc api
  @ 2024-12-02 22:36  3%   ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-12-02 22:36 UTC (permalink / raw)
  To: Arkadiusz Kusztal; +Cc: dev, ferruh.yigit, kai.ji, brian.dooley

On Tue,  1 Oct 2024 19:11:48 +0100
Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com> wrote:

> The current net CRC API is not thread-safe, this patch
> solves this by adding another, thread-safe API functions.

Couldn't the old API be made threadsafe with TLS?

> This API is also safe to use across multiple processes,
> yet with limitations on max-simd-bitwidth, which will be checked only by
> the process that created the CRC context; all other processes will use
> the same CRC function when used with the same CRC context.
> It is an undefined behavior when process binaries are compiled
> with different SIMD capabilities when the same CRC context is used.
> 
> Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>

The API/ABI can't change for 25.03, do you want to support both?
Or wait until 25.11?

^ permalink raw reply	[relevance 3%]

* [PATCH] version: 25.03-rc0
@ 2024-12-03  7:54 11% David Marchand
  2024-12-04 10:06  3% ` Thomas Monjalon
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-12-03  7:54 UTC (permalink / raw)
  To: dev; +Cc: thomas, Aaron Conole, Michael Santana

Start a new release cycle with empty release notes.
Bump version and ABI minor.
Bump libabigail from 2.4 to 2.6 and enable ABI checks.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 .github/workflows/build.yml            |   8 +-
 ABI_VERSION                            |   2 +-
 VERSION                                |   2 +-
 doc/guides/rel_notes/index.rst         |   1 +
 doc/guides/rel_notes/release_25_03.rst | 138 +++++++++++++++++++++++++
 5 files changed, 145 insertions(+), 6 deletions(-)
 create mode 100644 doc/guides/rel_notes/release_25_03.rst

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index d99700b6e9..dcafb4a8f5 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -12,7 +12,7 @@ defaults:
 env:
   REF_GIT_BRANCH: main
   REF_GIT_REPO: https://dpdk.org/git/dpdk
-  REF_GIT_TAG: none
+  REF_GIT_TAG: v24.11
 
 jobs:
   checkpatch:
@@ -46,7 +46,7 @@ jobs:
       BUILD_EXAMPLES: ${{ contains(matrix.config.checks, 'examples') }}
       CC: ccache ${{ matrix.config.compiler }}
       DEF_LIB: ${{ matrix.config.library }}
-      LIBABIGAIL_VERSION: libabigail-2.4
+      LIBABIGAIL_VERSION: libabigail-2.6
       MINGW: ${{ matrix.config.cross == 'mingw' }}
       MINI: ${{ matrix.config.mini != '' }}
       PPC64LE: ${{ matrix.config.cross == 'ppc64le' }}
@@ -69,7 +69,7 @@ jobs:
             checks: stdatomic
           - os: ubuntu-22.04
             compiler: gcc
-            checks: debug+doc+examples+tests
+            checks: abi+debug+doc+examples+tests
           - os: ubuntu-22.04
             compiler: clang
             checks: asan+doc+tests
@@ -133,7 +133,7 @@ jobs:
         python3-pyelftools python3-setuptools python3-wheel zlib1g-dev
     - name: Install libabigail build dependencies if no cache is available
       if: env.ABI_CHECKS == 'true' && steps.libabigail-cache.outputs.cache-hit != 'true'
-      run: sudo apt install -y autoconf automake libdw-dev libtool libxml2-dev
+      run: sudo apt install -y autoconf automake libdw-dev libtool libxml2-dev libxxhash-dev
     - name: Install i386 cross compiling packages
       if: env.BUILD_32BIT == 'true'
       run: sudo apt install -y gcc-multilib g++-multilib libnuma-dev:i386
diff --git a/ABI_VERSION b/ABI_VERSION
index be8e64f5a3..8b9bee5b58 100644
--- a/ABI_VERSION
+++ b/ABI_VERSION
@@ -1 +1 @@
-25.0
+25.1
diff --git a/VERSION b/VERSION
index 0a492611a0..04a8405dad 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-24.11.0
+25.03.0-rc0
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index 74ddae3e81..fc0309113e 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -8,6 +8,7 @@ Release Notes
     :maxdepth: 1
     :numbered:
 
+    release_25_03
     release_24_11
     release_24_07
     release_24_03
diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
new file mode 100644
index 0000000000..426dfcd982
--- /dev/null
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -0,0 +1,138 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2024 The DPDK contributors
+
+.. include:: <isonum.txt>
+
+DPDK Release 25.03
+==================
+
+.. **Read this first.**
+
+   The text in the sections below explains how to update the release notes.
+
+   Use proper spelling, capitalization and punctuation in all sections.
+
+   Variable and config names should be quoted as fixed width text:
+   ``LIKE_THIS``.
+
+   Build the docs and view the output file to ensure the changes are correct::
+
+      ninja -C build doc
+      xdg-open build/doc/guides/html/rel_notes/release_25_03.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release.
+   Sample format:
+
+   * **Add a title in the past tense with a full stop.**
+
+     Add a short 1-2 sentence description in the past tense.
+     The description should be enough to allow someone scanning
+     the release notes to understand the new feature.
+
+     If the feature adds a lot of sub-features you can use a bullet list
+     like this:
+
+     * Added feature foo to do something.
+     * Enhanced feature bar to do something else.
+
+     Refer to the previous release notes for examples.
+
+     Suggested order in release notes items:
+     * Core libs (EAL, mempool, ring, mbuf, buses)
+     * Device abstraction libs and PMDs (ordered alphabetically by vendor name)
+       - ethdev (lib, PMDs)
+       - cryptodev (lib, PMDs)
+       - eventdev (lib, PMDs)
+       - etc
+     * Other libs
+     * Apps, Examples, Tools (if significant)
+
+     This section is a comment. Do not overwrite or remove it.
+     Also, make sure to start the actual text at the margin.
+     =======================================================
+
+
+Removed Items
+-------------
+
+.. This section should contain removed items in this release. Sample format:
+
+   * Add a short 1-2 sentence description of the removed item
+     in the past tense.
+
+   This section is a comment. Do not overwrite or remove it.
+   Also, make sure to start the actual text at the margin.
+   =======================================================
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+   * sample: Add a short 1-2 sentence description of the API change
+     which was announced in the previous releases and made in this release.
+     Start with a scope label like "ethdev:".
+     Use fixed width quotes for ``function_names`` or ``struct_names``.
+     Use the past tense.
+
+   This section is a comment. Do not overwrite or remove it.
+   Also, make sure to start the actual text at the margin.
+   =======================================================
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+   * sample: Add a short 1-2 sentence description of the ABI change
+     which was announced in the previous releases and made in this release.
+     Start with a scope label like "ethdev:".
+     Use fixed width quotes for ``function_names`` or ``struct_names``.
+     Use the past tense.
+
+   This section is a comment. Do not overwrite or remove it.
+   Also, make sure to start the actual text at the margin.
+   =======================================================
+
+* No ABI change that would break compatibility with 24.11.
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+   * **Add title in present tense with full stop.**
+
+     Add a short 1-2 sentence description of the known issue
+     in the present tense. Add information on any known workarounds.
+
+   This section is a comment. Do not overwrite or remove it.
+   Also, make sure to start the actual text at the margin.
+   =======================================================
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested
+   with this release.
+
+   The format is:
+
+   * <vendor> platform with <vendor> <type of devices> combinations
+
+     * List of CPU
+     * List of OS
+     * List of devices
+     * Other relevant details...
+
+   This section is a comment. Do not overwrite or remove it.
+   Also, make sure to start the actual text at the margin.
+   =======================================================
-- 
2.47.0


^ permalink raw reply	[relevance 11%]

* Re: [RFC v3 2/2] ethdev: introduce the cache stashing hints API
  @ 2024-12-03 21:13  3%     ` Stephen Hemminger
  2024-12-05 15:40  3%       ` David Marchand
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2024-12-03 21:13 UTC (permalink / raw)
  To: Wathsala Vithanage
  Cc: Thomas Monjalon, Ferruh Yigit, Andrew Rybchenko, dev, nd,
	Honnappa Nagarahalli, Dhruv Tripathi

On Mon, 21 Oct 2024 01:52:46 +0000
Wathsala Vithanage <wathsala.vithanage@arm.com> wrote:

> Extend the ethdev library to enable the stashing of different data
> objects, such as the ones listed below, into CPU caches directly
> from the NIC.
> 
> - Rx/Tx queue descriptors
> - Rx packets
> - Packet headers
> - packet payloads
> - Data of a packet at an offset from the start of the packet
> 
> The APIs are designed in a hardware/vendor agnostic manner such that
> supporting PMDs could use any capabilities available in the underlying
> hardware for fine-grained stashing of data objects into a CPU cache
> (e.g., Steering Tags int PCIe TLP Processing Hints).
> 
> The API provides an interface to query the availability of stashing
> capabilities, i.e., platform/NIC support, stashable object types, etc,
> via the rte_eth_dev_stashing_capabilities_get interface.
> 
> The function pair rte_eth_dev_stashing_rx_config_set and
> rte_eth_dev_stashing_tx_config_set sets the stashing hint (the CPU, 
> cache level, and data object types) on the Rx and Tx queues.
> 
> PMDs that support stashing must register their implementations with the
> following eth_dev_ops callbacks, which are invoked by the ethdev
> functions listed above.
> 
> - stashing_capabilities_get
> - stashing_rx_hints_set
> - stashing_tx_hints_set
> 
> Signed-off-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Dhruv Tripathi <dhruv.tripathi@arm.com>
> 
> ---
>  lib/ethdev/ethdev_driver.h |  66 +++++++++++++++
>  lib/ethdev/rte_ethdev.c    | 120 +++++++++++++++++++++++++++
>  lib/ethdev/rte_ethdev.h    | 161 +++++++++++++++++++++++++++++++++++++
>  lib/ethdev/version.map     |   4 +
>  4 files changed, 351 insertions(+)
> 
> diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> index 1fd4562b40..7caaea54a8 100644
> --- a/lib/ethdev/ethdev_driver.h
> +++ b/lib/ethdev/ethdev_driver.h
> @@ -1367,6 +1367,68 @@ enum rte_eth_dev_operation {
>  typedef uint64_t (*eth_get_restore_flags_t)(struct rte_eth_dev *dev,
>  					    enum rte_eth_dev_operation op);
>  
> +/**
> + * @internal
> + * Set cache stashing hints in Rx queue.
> + *
> + * @param dev
> + *   Port (ethdev) handle.
> + * @param queue_id
> + *   Rx queue.
> + * @param config
> + *   Stashing hints configuration for the queue.
> + *
> + * @return
> + *   -ENOTSUP if the device or the platform does not support cache stashing.
> + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> + *   -EINVAL  on invalid arguments.
> + *   0 on success.
> + */
> +typedef int (*eth_stashing_rx_hints_set_t)(struct rte_eth_dev *dev, uint16_t queue_id,
> +					   struct rte_eth_stashing_config *config);
> +
> +/**
> + * @internal
> + * Set cache stashing hints in Tx queue.
> + *
> + * @param dev
> + *   Port (ethdev) handle.
> + * @param queue_id
> + *   Tx queue.
> + * @param config
> + *   Stashing hints configuration for the queue.
> + *
> + * @return
> + *   -ENOTSUP if the device or the platform does not support cache stashing.
> + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> + *   -EINVAL  on invalid arguments.
> + *   0 on success.
> + */
> +typedef int (*eth_stashing_tx_hints_set_t)(struct rte_eth_dev *dev, uint16_t queue_id,
> +					   struct rte_eth_stashing_config *config);
> +
> +/**
> + * @internal
> + * Get cache stashing object types supported in the ethernet device.
> + * The return value indicates availability of stashing hints support
> + * in the hardware and the PMD.
> + *
> + * @param dev
> + *   Port (ethdev) handle.
> + * @param objects
> + *   PMD sets supported bits on return.
> + *
> + * @return
> + *   -ENOTSUP if the device or the platform does not support cache stashing.
> + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> + *   -EINVAL  on NULL values for types or hints parameters.
> + *   On return, types and hints parameters will have bits set for supported
> + *   object types and hints.
> + *   0 on success.
> + */
> +typedef int (*eth_stashing_capabilities_get_t)(struct rte_eth_dev *dev,
> +					     uint16_t *objects);
> +
>  /**
>   * @internal A structure containing the functions exported by an Ethernet driver.
>   */
> @@ -1393,6 +1455,10 @@ struct eth_dev_ops {
>  	eth_mac_addr_remove_t      mac_addr_remove; /**< Remove MAC address */
>  	eth_mac_addr_add_t         mac_addr_add;  /**< Add a MAC address */
>  	eth_mac_addr_set_t         mac_addr_set;  /**< Set a MAC address */
> +	eth_stashing_rx_hints_set_t   stashing_rx_hints_set; /**< Set Rx cache stashing*/
> +	eth_stashing_tx_hints_set_t   stashing_tx_hints_set; /**< Set Tx cache stashing*/
> +	/** Get supported stashing hints*/
> +	eth_stashing_capabilities_get_t stashing_capabilities_get;
>  	/** Set list of multicast addresses */
>  	eth_set_mc_addr_list_t     set_mc_addr_list;
>  	mtu_set_t                  mtu_set;       /**< Set MTU */

Since eth_dev_ops is visible in application binary, it is part of the ABI.
Therefore it can not be changed until 25.11 release.


^ permalink raw reply	[relevance 3%]

* Re: [PATCH] version: 25.03-rc0
  2024-12-03  7:54 11% [PATCH] version: 25.03-rc0 David Marchand
@ 2024-12-04 10:06  3% ` Thomas Monjalon
  2024-12-04 12:05  3%   ` David Marchand
  0 siblings, 1 reply; 169+ results
From: Thomas Monjalon @ 2024-12-04 10:06 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Aaron Conole, Michael Santana

03/12/2024 08:54, David Marchand:
> Start a new release cycle with empty release notes.
> Bump version and ABI minor.
> Bump libabigail from 2.4 to 2.6 and enable ABI checks.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>

Added a note about the new libabigail which will allow us
to split a library (like EAL) without having warnings.

Applied, so a new release cycle is started!

Note to all branch maintainers: please rebase on this commit
and enable ABI checks in your local configuration.

Happy 25.03 :)



^ permalink raw reply	[relevance 3%]

* Re: [PATCH] version: 25.03-rc0
  2024-12-04 10:06  3% ` Thomas Monjalon
@ 2024-12-04 12:05  3%   ` David Marchand
  0 siblings, 0 replies; 169+ results
From: David Marchand @ 2024-12-04 12:05 UTC (permalink / raw)
  To: dpdklab, Patrick Robb
  Cc: dev, Aaron Conole, Michael Santana, ci, Thomas Monjalon

On Wed, Dec 4, 2024 at 11:06 AM Thomas Monjalon <thomas@monjalon.net> wrote:
>
> 03/12/2024 08:54, David Marchand:
> > Start a new release cycle with empty release notes.
> > Bump version and ABI minor.
> > Bump libabigail from 2.4 to 2.6 and enable ABI checks.
> >
> > Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Thomas Monjalon <thomas@monjalon.net>
>
> Added a note about the new libabigail which will allow us
> to split a library (like EAL) without having warnings.
>
> Applied, so a new release cycle is started!
>
> Note to all branch maintainers: please rebase on this commit
> and enable ABI checks in your local configuration.
>
> Happy 25.03 :)

Time to re-enable ABI checks in CI too (please note that libabigail
version has been bumped).


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* Re: [RFC v3 2/2] ethdev: introduce the cache stashing hints API
  2024-12-03 21:13  3%     ` Stephen Hemminger
@ 2024-12-05 15:40  3%       ` David Marchand
  2024-12-05 21:00  0%         ` Stephen Hemminger
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-12-05 15:40 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Wathsala Vithanage, Thomas Monjalon, Ferruh Yigit,
	Andrew Rybchenko, dev, nd, Honnappa Nagarahalli, Dhruv Tripathi

On Tue, Dec 3, 2024 at 10:13 PM Stephen Hemminger
<stephen@networkplumber.org> wrote:
>
> On Mon, 21 Oct 2024 01:52:46 +0000
> Wathsala Vithanage <wathsala.vithanage@arm.com> wrote:
>
> > Extend the ethdev library to enable the stashing of different data
> > objects, such as the ones listed below, into CPU caches directly
> > from the NIC.
> >
> > - Rx/Tx queue descriptors
> > - Rx packets
> > - Packet headers
> > - packet payloads
> > - Data of a packet at an offset from the start of the packet
> >
> > The APIs are designed in a hardware/vendor agnostic manner such that
> > supporting PMDs could use any capabilities available in the underlying
> > hardware for fine-grained stashing of data objects into a CPU cache
> > (e.g., Steering Tags int PCIe TLP Processing Hints).
> >
> > The API provides an interface to query the availability of stashing
> > capabilities, i.e., platform/NIC support, stashable object types, etc,
> > via the rte_eth_dev_stashing_capabilities_get interface.
> >
> > The function pair rte_eth_dev_stashing_rx_config_set and
> > rte_eth_dev_stashing_tx_config_set sets the stashing hint (the CPU,
> > cache level, and data object types) on the Rx and Tx queues.
> >
> > PMDs that support stashing must register their implementations with the
> > following eth_dev_ops callbacks, which are invoked by the ethdev
> > functions listed above.
> >
> > - stashing_capabilities_get
> > - stashing_rx_hints_set
> > - stashing_tx_hints_set
> >
> > Signed-off-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Dhruv Tripathi <dhruv.tripathi@arm.com>
> >
> > ---
> >  lib/ethdev/ethdev_driver.h |  66 +++++++++++++++
> >  lib/ethdev/rte_ethdev.c    | 120 +++++++++++++++++++++++++++
> >  lib/ethdev/rte_ethdev.h    | 161 +++++++++++++++++++++++++++++++++++++
> >  lib/ethdev/version.map     |   4 +
> >  4 files changed, 351 insertions(+)
> >
> > diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> > index 1fd4562b40..7caaea54a8 100644
> > --- a/lib/ethdev/ethdev_driver.h
> > +++ b/lib/ethdev/ethdev_driver.h
> > @@ -1367,6 +1367,68 @@ enum rte_eth_dev_operation {
> >  typedef uint64_t (*eth_get_restore_flags_t)(struct rte_eth_dev *dev,
> >                                           enum rte_eth_dev_operation op);
> >
> > +/**
> > + * @internal
> > + * Set cache stashing hints in Rx queue.
> > + *
> > + * @param dev
> > + *   Port (ethdev) handle.
> > + * @param queue_id
> > + *   Rx queue.
> > + * @param config
> > + *   Stashing hints configuration for the queue.
> > + *
> > + * @return
> > + *   -ENOTSUP if the device or the platform does not support cache stashing.
> > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> > + *   -EINVAL  on invalid arguments.
> > + *   0 on success.
> > + */
> > +typedef int (*eth_stashing_rx_hints_set_t)(struct rte_eth_dev *dev, uint16_t queue_id,
> > +                                        struct rte_eth_stashing_config *config);
> > +
> > +/**
> > + * @internal
> > + * Set cache stashing hints in Tx queue.
> > + *
> > + * @param dev
> > + *   Port (ethdev) handle.
> > + * @param queue_id
> > + *   Tx queue.
> > + * @param config
> > + *   Stashing hints configuration for the queue.
> > + *
> > + * @return
> > + *   -ENOTSUP if the device or the platform does not support cache stashing.
> > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> > + *   -EINVAL  on invalid arguments.
> > + *   0 on success.
> > + */
> > +typedef int (*eth_stashing_tx_hints_set_t)(struct rte_eth_dev *dev, uint16_t queue_id,
> > +                                        struct rte_eth_stashing_config *config);
> > +
> > +/**
> > + * @internal
> > + * Get cache stashing object types supported in the ethernet device.
> > + * The return value indicates availability of stashing hints support
> > + * in the hardware and the PMD.
> > + *
> > + * @param dev
> > + *   Port (ethdev) handle.
> > + * @param objects
> > + *   PMD sets supported bits on return.
> > + *
> > + * @return
> > + *   -ENOTSUP if the device or the platform does not support cache stashing.
> > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing feature.
> > + *   -EINVAL  on NULL values for types or hints parameters.
> > + *   On return, types and hints parameters will have bits set for supported
> > + *   object types and hints.
> > + *   0 on success.
> > + */
> > +typedef int (*eth_stashing_capabilities_get_t)(struct rte_eth_dev *dev,
> > +                                          uint16_t *objects);
> > +
> >  /**
> >   * @internal A structure containing the functions exported by an Ethernet driver.
> >   */
> > @@ -1393,6 +1455,10 @@ struct eth_dev_ops {
> >       eth_mac_addr_remove_t      mac_addr_remove; /**< Remove MAC address */
> >       eth_mac_addr_add_t         mac_addr_add;  /**< Add a MAC address */
> >       eth_mac_addr_set_t         mac_addr_set;  /**< Set a MAC address */
> > +     eth_stashing_rx_hints_set_t   stashing_rx_hints_set; /**< Set Rx cache stashing*/
> > +     eth_stashing_tx_hints_set_t   stashing_tx_hints_set; /**< Set Tx cache stashing*/
> > +     /** Get supported stashing hints*/
> > +     eth_stashing_capabilities_get_t stashing_capabilities_get;
> >       /** Set list of multicast addresses */
> >       eth_set_mc_addr_list_t     set_mc_addr_list;
> >       mtu_set_t                  mtu_set;       /**< Set MTU */
>
> Since eth_dev_ops is visible in application binary, it is part of the ABI.
> Therefore it can not be changed until 25.11 release.

The layout of eth_dev_ops is not exposed to applications as it is in a
private header.
Could you clarify where you see a breakage for an application?


I see an ABI breakage for out of tree drivers though.
This could be avoided by moving those added ops at the end of the struct?


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* Re: [RFC v3 2/2] ethdev: introduce the cache stashing hints API
  2024-12-05 15:40  3%       ` David Marchand
@ 2024-12-05 21:00  0%         ` Stephen Hemminger
  0 siblings, 0 replies; 169+ results
From: Stephen Hemminger @ 2024-12-05 21:00 UTC (permalink / raw)
  To: David Marchand
  Cc: Wathsala Vithanage, Thomas Monjalon, Ferruh Yigit,
	Andrew Rybchenko, dev, nd, Honnappa Nagarahalli, Dhruv Tripathi

[-- Attachment #1: Type: text/plain, Size: 6686 bytes --]

Your right my test was crude. Just do build and look at symbol table of
static linked binary.
I was confused since pointer is exposed but not data structure

On Thu, Dec 5, 2024, 07:40 David Marchand <david.marchand@redhat.com> wrote:

> On Tue, Dec 3, 2024 at 10:13 PM Stephen Hemminger
> <stephen@networkplumber.org> wrote:
> >
> > On Mon, 21 Oct 2024 01:52:46 +0000
> > Wathsala Vithanage <wathsala.vithanage@arm.com> wrote:
> >
> > > Extend the ethdev library to enable the stashing of different data
> > > objects, such as the ones listed below, into CPU caches directly
> > > from the NIC.
> > >
> > > - Rx/Tx queue descriptors
> > > - Rx packets
> > > - Packet headers
> > > - packet payloads
> > > - Data of a packet at an offset from the start of the packet
> > >
> > > The APIs are designed in a hardware/vendor agnostic manner such that
> > > supporting PMDs could use any capabilities available in the underlying
> > > hardware for fine-grained stashing of data objects into a CPU cache
> > > (e.g., Steering Tags int PCIe TLP Processing Hints).
> > >
> > > The API provides an interface to query the availability of stashing
> > > capabilities, i.e., platform/NIC support, stashable object types, etc,
> > > via the rte_eth_dev_stashing_capabilities_get interface.
> > >
> > > The function pair rte_eth_dev_stashing_rx_config_set and
> > > rte_eth_dev_stashing_tx_config_set sets the stashing hint (the CPU,
> > > cache level, and data object types) on the Rx and Tx queues.
> > >
> > > PMDs that support stashing must register their implementations with the
> > > following eth_dev_ops callbacks, which are invoked by the ethdev
> > > functions listed above.
> > >
> > > - stashing_capabilities_get
> > > - stashing_rx_hints_set
> > > - stashing_tx_hints_set
> > >
> > > Signed-off-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> > > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > > Reviewed-by: Dhruv Tripathi <dhruv.tripathi@arm.com>
> > >
> > > ---
> > >  lib/ethdev/ethdev_driver.h |  66 +++++++++++++++
> > >  lib/ethdev/rte_ethdev.c    | 120 +++++++++++++++++++++++++++
> > >  lib/ethdev/rte_ethdev.h    | 161 +++++++++++++++++++++++++++++++++++++
> > >  lib/ethdev/version.map     |   4 +
> > >  4 files changed, 351 insertions(+)
> > >
> > > diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> > > index 1fd4562b40..7caaea54a8 100644
> > > --- a/lib/ethdev/ethdev_driver.h
> > > +++ b/lib/ethdev/ethdev_driver.h
> > > @@ -1367,6 +1367,68 @@ enum rte_eth_dev_operation {
> > >  typedef uint64_t (*eth_get_restore_flags_t)(struct rte_eth_dev *dev,
> > >                                           enum rte_eth_dev_operation
> op);
> > >
> > > +/**
> > > + * @internal
> > > + * Set cache stashing hints in Rx queue.
> > > + *
> > > + * @param dev
> > > + *   Port (ethdev) handle.
> > > + * @param queue_id
> > > + *   Rx queue.
> > > + * @param config
> > > + *   Stashing hints configuration for the queue.
> > > + *
> > > + * @return
> > > + *   -ENOTSUP if the device or the platform does not support cache
> stashing.
> > > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing
> feature.
> > > + *   -EINVAL  on invalid arguments.
> > > + *   0 on success.
> > > + */
> > > +typedef int (*eth_stashing_rx_hints_set_t)(struct rte_eth_dev *dev,
> uint16_t queue_id,
> > > +                                        struct
> rte_eth_stashing_config *config);
> > > +
> > > +/**
> > > + * @internal
> > > + * Set cache stashing hints in Tx queue.
> > > + *
> > > + * @param dev
> > > + *   Port (ethdev) handle.
> > > + * @param queue_id
> > > + *   Tx queue.
> > > + * @param config
> > > + *   Stashing hints configuration for the queue.
> > > + *
> > > + * @return
> > > + *   -ENOTSUP if the device or the platform does not support cache
> stashing.
> > > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing
> feature.
> > > + *   -EINVAL  on invalid arguments.
> > > + *   0 on success.
> > > + */
> > > +typedef int (*eth_stashing_tx_hints_set_t)(struct rte_eth_dev *dev,
> uint16_t queue_id,
> > > +                                        struct
> rte_eth_stashing_config *config);
> > > +
> > > +/**
> > > + * @internal
> > > + * Get cache stashing object types supported in the ethernet device.
> > > + * The return value indicates availability of stashing hints support
> > > + * in the hardware and the PMD.
> > > + *
> > > + * @param dev
> > > + *   Port (ethdev) handle.
> > > + * @param objects
> > > + *   PMD sets supported bits on return.
> > > + *
> > > + * @return
> > > + *   -ENOTSUP if the device or the platform does not support cache
> stashing.
> > > + *   -ENOSYS  if the underlying PMD hasn't implemented cache stashing
> feature.
> > > + *   -EINVAL  on NULL values for types or hints parameters.
> > > + *   On return, types and hints parameters will have bits set for
> supported
> > > + *   object types and hints.
> > > + *   0 on success.
> > > + */
> > > +typedef int (*eth_stashing_capabilities_get_t)(struct rte_eth_dev
> *dev,
> > > +                                          uint16_t *objects);
> > > +
> > >  /**
> > >   * @internal A structure containing the functions exported by an
> Ethernet driver.
> > >   */
> > > @@ -1393,6 +1455,10 @@ struct eth_dev_ops {
> > >       eth_mac_addr_remove_t      mac_addr_remove; /**< Remove MAC
> address */
> > >       eth_mac_addr_add_t         mac_addr_add;  /**< Add a MAC address
> */
> > >       eth_mac_addr_set_t         mac_addr_set;  /**< Set a MAC address
> */
> > > +     eth_stashing_rx_hints_set_t   stashing_rx_hints_set; /**< Set Rx
> cache stashing*/
> > > +     eth_stashing_tx_hints_set_t   stashing_tx_hints_set; /**< Set Tx
> cache stashing*/
> > > +     /** Get supported stashing hints*/
> > > +     eth_stashing_capabilities_get_t stashing_capabilities_get;
> > >       /** Set list of multicast addresses */
> > >       eth_set_mc_addr_list_t     set_mc_addr_list;
> > >       mtu_set_t                  mtu_set;       /**< Set MTU */
> >
> > Since eth_dev_ops is visible in application binary, it is part of the
> ABI.
> > Therefore it can not be changed until 25.11 release.
>
> The layout of eth_dev_ops is not exposed to applications as it is in a
> private header.
> Could you clarify where you see a breakage for an application?
>
>
> I see an ABI breakage for out of tree drivers though.
> This could be avoided by moving those added ops at the end of the struct?
>
>
> --
> David Marchand
>
>

[-- Attachment #2: Type: text/html, Size: 8692 bytes --]

^ permalink raw reply	[relevance 0%]

* RE: [PATCH v16 1/4] lib: add generic support for reading PMU events
  @ 2024-12-06 18:15  3%       ` Konstantin Ananyev
  2025-01-07  7:45  0%         ` Tomasz Duszynski
  0 siblings, 1 reply; 169+ results
From: Konstantin Ananyev @ 2024-12-06 18:15 UTC (permalink / raw)
  To: Tomasz Duszynski, Thomas Monjalon
  Cc: Ruifeng.Wang, bruce.richardson, david.marchand, dev, jerinj,
	konstantin.v.ananyev, mattias.ronnblom, mb, roretzla, stephen,
	zhoumin



> 
> Add support for programming PMU counters and reading their values
> in runtime bypassing kernel completely.
> 
> This is especially useful in cases where CPU cores are isolated
> i.e run dedicated tasks. In such cases one cannot use standard
> perf utility without sacrificing latency and performance.
> 
> Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
> ---

Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>

As future possible enhancements - I think it would be useful to
make control-path API MT safe, plus probably try to hide some of
the exposed internal structures (rte_pmu_event_group, etc.) inside .c
(to minimize surface for possible ABI breakage).

> --
> 2.34.1


^ permalink raw reply	[relevance 3%]

* Re: [PATCH 0/3] Defer lcore variables allocation
  @ 2024-12-09 17:40  3%       ` David Marchand
  2024-12-10  9:41  0%         ` Mattias Rönnblom
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-12-09 17:40 UTC (permalink / raw)
  To: Mattias Rönnblom; +Cc: dev, thomas, frode.nordahl, mattias.ronnblom

On Mon, Dec 9, 2024 at 4:39 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> On 2024-12-09 12:03, David Marchand wrote:
> > On Fri, Dec 6, 2024 at 12:02 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
> >> On 2024-12-05 18:57, David Marchand wrote:
> >>> As I had reported in rc2, the lcore variables allocation have a
> >>> noticeable impact on applications consuming DPDK, even when such
> >>> applications does not use DPDK, or use features associated to
> >>> some lcore variables.
> >>>
> >>> While the amount has been reduced in a rush before rc2,
> >>> there are still cases when the increased memory footprint is noticed
> >>> like in scaling tests.
> >>> See https://bugs.launchpad.net/ubuntu/+source/dpdk/+bug/2090931
> >>>
> >>
> >> What this bug report fails to mention is that it only affects
> >> applications using locked memory.
> >
> > - By locked memory, are you referring to mlock() and friends?
> > No ovsdb binary calls them, only the datapath cares about mlocking.
> >
> >
> > - At a minimum, I understand the lcore var change introduced an
> > increase in memory of 4kB * 128 (getpagesize() * RTE_MAX_LCORES),
> > since lcore_var_alloc() calls memset() of the lcore var size, for
> > every lcore.
> >
>
> Yes, that is my understanding. It's also consistent with the
> measurements I've posted on this list.
>
> > In this unit test where 1000 processes are kept alive in parallel,
> > this means memory consumption increased by 512k * 1000, so ~500M at
> > least.
> > This amount of memory is probably significant in a resource-restrained
> > env like a (Ubuntu) CI.
> >
> >
>
> I wouldn't expect thousands of concurrent processes in a
> resource-constrained system. Sounds wasteful indeed. But sure, there may
> well be scenarios where this make sense.
>
> > - I went and traced this unit tests on my laptop by monitoring
> > kmem:mm_page_alloc, though there may be a better metrics when it comes
> > to memory consumption.
> >
> > # dir=build; perf stat -e kmem:mm_page_alloc -- tests/testsuite -C
> > $dir/tests AUTOTEST_PATH=$dir/utilities:$dir/vswitchd:$dir/ovsdb:$dir/vtep:$dir/tests:$dir/ipsec::
> > 2154
> >
> > Which gives:
> > - 1 635 489      kmem:mm_page_alloc for v23.11
> > - 5 777 043      kmem:mm_page_alloc for v24.11
> >
>
> Interesting. What is vm.overcommit_memory set to?

# cat /proc/sys/vm/overcommit_memory
0

And I am not sure what is being used in Ubuntu CI.

But the problem is, in the end, simpler.

[snip]

>
> > There is a 4M difference, where I would expect 128k.
> > So something more happens, than a simple page allocation per lcore,
> > though I fail to understand what.

Isolating the perf events for one process of this huge test, I counted
4878 page alloc calls.
From them, 4108 had rte_lcore_var_alloc in their calling stack which
is unexpected.

After spending some time reading glibc, I noticed alloc_perturb().
*bingo*, I remembered that OVS unit tests are run with MALLOC_PERTURB_
(=165 after double checking OVS sources).

"""
Tunable: glibc.malloc.perturb

This tunable supersedes the MALLOC_PERTURB_ environment variable and
is identical in features.

If set to a non-zero value, memory blocks are initialized with values
depending on some low order bits of this tunable when they are
allocated (except when allocated by calloc) and freed. This can be
used to debug the use of uninitialized or freed heap memory. Note that
this option does not guarantee that the freed block will have any
specific values. It only guarantees that the content the block had
before it was freed will be overwritten.

The default value of this tunable is ‘0’.
"""

Now, reproducing this out of the test:

$ perf stat -e kmem:mm_page_alloc -- ./build/ovsdb/ovsdb-client --help
>/dev/null
 Performance counter stats for './build/ovsdb/ovsdb-client --help':
               810      kmem:mm_page_alloc
       0,003277941 seconds time elapsed
       0,003260000 seconds user
       0,000000000 seconds sys

$ MALLOC_PERTURB_=165 perf stat -e kmem:mm_page_alloc --
./build/ovsdb/ovsdb-client --help >/dev/null
 Performance counter stats for './build/ovsdb/ovsdb-client --help':
             4 789      kmem:mm_page_alloc
       0,008766171 seconds time elapsed
       0,000976000 seconds user
       0,007794000 seconds sys

So the issue is not triggered by mlock'd memory, but by the whole
buffer of 16M for lcore variables being touched by a glibc debugging
feature.

And in Ubuntu CI, it translated to requesting 16G.

> >
> >
> > Btw, just focusing on lcore var, I did two more tests:
> > - 1 606 998      kmem:mm_page_alloc for v24.11 + revert all lcore var changes.
> > - 1 634 606      kmem:mm_page_alloc for v24.11 + current series with
> > postponed allocations.
> >
> >
>
> If one move initialization to shared object constructors (from having
> been at some later time), and then end up not running that
> initialization code at all (e.g., DPDK is not used), those code pages
> will increase RSS. That might well hurt more than the lcore variable
> memory itself, depending on how much code is run.
>
> However, such read-only pages can be replaced with something more useful
> if the system is under memory pressure, so they aren't really a big
> issue as far as (real) memory footprint is concerned.
>
> Just linking to DPDK (and its dependencies) already came with a 1-7 MB
> RSS penalty, prior to lcore variables. I wonder how much of that goes
> away if all RTE_INIT() type constructors are removed.

Regardless of the RSS change, removing completely constructors is not simple.
Postponing *all* existing constructors from DPDK code would be an ABI
breakage, as RTE_INIT have a priority notion and an application
callbacks using RTE_INIT may rely on this.
Just deferring "unprioritised" constructors would be doable on paper,
but the location in rte_eal_init where those are is deferred would
have to be carefully evaluated (with -d plugins in mind).


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* Re: [PATCH RESEND v7 2/5] ethdev: fix skip valid port in probing callback
  @ 2024-12-10  1:50  0%     ` lihuisong (C)
  2025-01-10  3:21  0%       ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: lihuisong (C) @ 2024-12-10  1:50 UTC (permalink / raw)
  To: thomas, ferruh.yigit, Stephen Hemminger
  Cc: dev, fengchengwen, liuyonglong, andrew.rybchenko, Somnath Kotur,
	Ajit Khaparde, Dariusz Sosnowski, Suanming Mou, Matan Azrad,
	Ori Kam, Viacheslav Ovsiienko

Hi Ferruh, Stephen and Thomas,

Can you take a look at this patch? After all, it is an issue in ethdev 
layer.
This also is the fruit we disscussed with Thomas and Ferruh before.
Please go back to this thread. If we don't need this patch, please let 
me know. I will drop it from my upstreaming list.

/Huisong


在 2024/9/29 13:52, Huisong Li 写道:
> The event callback in application may use the macro RTE_ETH_FOREACH_DEV to
> iterate over all enabled ports to do something(like, verifying the port id
> validity) when receive a probing event. If the ethdev state of a port is
> not RTE_ETH_DEV_UNUSED, this port will be considered as a valid port.
>
> However, this state is set to RTE_ETH_DEV_ATTACHED after pushing probing
> event. It means that probing callback will skip this port. But this
> assignment can not move to front of probing notification. See
> commit be8cd210379a ("ethdev: fix port probing notification")
>
> So this patch has to add a new state, RTE_ETH_DEV_ALLOCATED. Set the ethdev
> state to RTE_ETH_DEV_ALLOCATED before pushing probing event and set it to
> RTE_ETH_DEV_ATTACHED after definitely probed. And this port is valid if its
> device state is 'ALLOCATED' or 'ATTACHED'.
>
> In addition, the new state has to be placed behind 'REMOVED' to avoid ABI
> break. Fortunately, this ethdev state is internal and applications can not
> access it directly. So this patch encapsulates an API, rte_eth_dev_is_used,
> for ethdev or PMD to call and eliminate concerns about using this state
> enum value comparison.
>
> Fixes: be8cd210379a ("ethdev: fix port probing notification")
> Cc: stable@dpdk.org
>
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>   drivers/net/bnxt/bnxt_ethdev.c |  3 ++-
>   drivers/net/mlx5/mlx5.c        |  2 +-
>   lib/ethdev/ethdev_driver.c     | 13 ++++++++++---
>   lib/ethdev/ethdev_driver.h     | 12 ++++++++++++
>   lib/ethdev/ethdev_pci.h        |  2 +-
>   lib/ethdev/rte_class_eth.c     |  2 +-
>   lib/ethdev/rte_ethdev.c        |  4 ++--
>   lib/ethdev/rte_ethdev.h        |  4 +++-
>   lib/ethdev/version.map         |  1 +
>   9 files changed, 33 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
> index c6ad764813..7401dcd8b5 100644
> --- a/drivers/net/bnxt/bnxt_ethdev.c
> +++ b/drivers/net/bnxt/bnxt_ethdev.c
> @@ -6612,7 +6612,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
>   
>   	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
>   
> -	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
> +
> +	if (rte_eth_dev_is_used(eth_dev->state))
>   		bnxt_dev_close_op(eth_dev);
>   
>   	return 0;
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> index 8d266b0e64..0df49e1f69 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -3371,7 +3371,7 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
>   	while (port_id < RTE_MAX_ETHPORTS) {
>   		struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>   
> -		if (dev->state != RTE_ETH_DEV_UNUSED &&
> +		if (rte_eth_dev_is_used(dev->state) &&
>   		    dev->device &&
>   		    (dev->device == odev ||
>   		     (dev->device->driver &&
> diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
> index c335a25a82..a87dbb00ff 100644
> --- a/lib/ethdev/ethdev_driver.c
> +++ b/lib/ethdev/ethdev_driver.c
> @@ -55,8 +55,8 @@ eth_dev_find_free_port(void)
>   	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
>   		/* Using shared name field to find a free port. */
>   		if (eth_dev_shared_data->data[i].name[0] == '\0') {
> -			RTE_ASSERT(rte_eth_devices[i].state ==
> -				   RTE_ETH_DEV_UNUSED);
> +			RTE_ASSERT(!rte_eth_dev_is_used(
> +					rte_eth_devices[i].state));
>   			return i;
>   		}
>   	}
> @@ -221,11 +221,18 @@ rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
>   	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
>   		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
>   
> +	dev->state = RTE_ETH_DEV_ALLOCATED;
>   	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
>   
>   	dev->state = RTE_ETH_DEV_ATTACHED;
>   }
>   
> +bool rte_eth_dev_is_used(uint16_t dev_state)
> +{
> +	return dev_state == RTE_ETH_DEV_ALLOCATED ||
> +		dev_state == RTE_ETH_DEV_ATTACHED;
> +}
> +
>   int
>   rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
>   {
> @@ -243,7 +250,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
>   	if (ret != 0)
>   		return ret;
>   
> -	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
> +	if (rte_eth_dev_is_used(eth_dev->state))
>   		rte_eth_dev_callback_process(eth_dev,
>   				RTE_ETH_EVENT_DESTROY, NULL);
>   
> diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> index abed4784aa..aa35b65848 100644
> --- a/lib/ethdev/ethdev_driver.h
> +++ b/lib/ethdev/ethdev_driver.h
> @@ -1704,6 +1704,18 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
>   __rte_internal
>   void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
>   
> +/**
> + * Check if a Ethernet device state is used or not
> + *
> + * @param dev_state
> + *   The state of the Ethernet device
> + * @return
> + *   - true if the state of the Ethernet device is allocated or attached
> + *   - false if this state is neither allocated nor attached
> + */
> +__rte_internal
> +bool rte_eth_dev_is_used(uint16_t dev_state);
> +
>   /**
>    * Create memzone for HW rings.
>    * malloc can't be used as the physical address is needed.
> diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
> index ec4f731270..05dec6716b 100644
> --- a/lib/ethdev/ethdev_pci.h
> +++ b/lib/ethdev/ethdev_pci.h
> @@ -179,7 +179,7 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev,
>   	 * eth device has been released.
>   	 */
>   	if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
> -	    eth_dev->state == RTE_ETH_DEV_UNUSED)
> +	    !rte_eth_dev_is_used(eth_dev->state))
>   		return 0;
>   
>   	if (dev_uninit) {
> diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
> index b52f1dd9f2..81e70670d9 100644
> --- a/lib/ethdev/rte_class_eth.c
> +++ b/lib/ethdev/rte_class_eth.c
> @@ -118,7 +118,7 @@ eth_dev_match(const struct rte_eth_dev *edev,
>   	const struct rte_kvargs *kvlist = arg->kvlist;
>   	unsigned int pair;
>   
> -	if (edev->state == RTE_ETH_DEV_UNUSED)
> +	if (!rte_eth_dev_is_used(edev->state))
>   		return -1;
>   	if (arg->device != NULL && arg->device != edev->device)
>   		return -1;
> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
> index a1f7efa913..4dc66abb7b 100644
> --- a/lib/ethdev/rte_ethdev.c
> +++ b/lib/ethdev/rte_ethdev.c
> @@ -349,7 +349,7 @@ uint16_t
>   rte_eth_find_next(uint16_t port_id)
>   {
>   	while (port_id < RTE_MAX_ETHPORTS &&
> -			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
> +	       !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>   		port_id++;
>   
>   	if (port_id >= RTE_MAX_ETHPORTS)
> @@ -408,7 +408,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
>   	int is_valid;
>   
>   	if (port_id >= RTE_MAX_ETHPORTS ||
> -	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
> +	    !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>   		is_valid = 0;
>   	else
>   		is_valid = 1;
> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
> index a9f92006da..9cc37e8cde 100644
> --- a/lib/ethdev/rte_ethdev.h
> +++ b/lib/ethdev/rte_ethdev.h
> @@ -2083,10 +2083,12 @@ typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
>   enum rte_eth_dev_state {
>   	/** Device is unused before being probed. */
>   	RTE_ETH_DEV_UNUSED = 0,
> -	/** Device is attached when allocated in probing. */
> +	/** Device is attached when definitely probed. */
>   	RTE_ETH_DEV_ATTACHED,
>   	/** Device is in removed state when plug-out is detected. */
>   	RTE_ETH_DEV_REMOVED,
> +	/** Device is allocated and is set before reporting new event. */
> +	RTE_ETH_DEV_ALLOCATED,
>   };
>   
>   struct rte_eth_dev_sriov {
> diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
> index f63dc32aa2..6ecf1ab89d 100644
> --- a/lib/ethdev/version.map
> +++ b/lib/ethdev/version.map
> @@ -349,6 +349,7 @@ INTERNAL {
>   	rte_eth_dev_get_by_name;
>   	rte_eth_dev_is_rx_hairpin_queue;
>   	rte_eth_dev_is_tx_hairpin_queue;
> +	rte_eth_dev_is_used;
>   	rte_eth_dev_probing_finish;
>   	rte_eth_dev_release_port;
>   	rte_eth_dev_internal_reset;

^ permalink raw reply	[relevance 0%]

* Re: [PATCH 0/3] Defer lcore variables allocation
  2024-12-09 17:40  3%       ` David Marchand
@ 2024-12-10  9:41  0%         ` Mattias Rönnblom
  0 siblings, 0 replies; 169+ results
From: Mattias Rönnblom @ 2024-12-10  9:41 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, thomas, frode.nordahl, mattias.ronnblom

On 2024-12-09 18:40, David Marchand wrote:
> On Mon, Dec 9, 2024 at 4:39 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>> On 2024-12-09 12:03, David Marchand wrote:
>>> On Fri, Dec 6, 2024 at 12:02 PM Mattias Rönnblom <hofors@lysator.liu.se> wrote:
>>>> On 2024-12-05 18:57, David Marchand wrote:
>>>>> As I had reported in rc2, the lcore variables allocation have a
>>>>> noticeable impact on applications consuming DPDK, even when such
>>>>> applications does not use DPDK, or use features associated to
>>>>> some lcore variables.
>>>>>
>>>>> While the amount has been reduced in a rush before rc2,
>>>>> there are still cases when the increased memory footprint is noticed
>>>>> like in scaling tests.
>>>>> See https://bugs.launchpad.net/ubuntu/+source/dpdk/+bug/2090931
>>>>>
>>>>
>>>> What this bug report fails to mention is that it only affects
>>>> applications using locked memory.
>>>
>>> - By locked memory, are you referring to mlock() and friends?
>>> No ovsdb binary calls them, only the datapath cares about mlocking.
>>>
>>>
>>> - At a minimum, I understand the lcore var change introduced an
>>> increase in memory of 4kB * 128 (getpagesize() * RTE_MAX_LCORES),
>>> since lcore_var_alloc() calls memset() of the lcore var size, for
>>> every lcore.
>>>
>>
>> Yes, that is my understanding. It's also consistent with the
>> measurements I've posted on this list.
>>
>>> In this unit test where 1000 processes are kept alive in parallel,
>>> this means memory consumption increased by 512k * 1000, so ~500M at
>>> least.
>>> This amount of memory is probably significant in a resource-restrained
>>> env like a (Ubuntu) CI.
>>>
>>>
>>
>> I wouldn't expect thousands of concurrent processes in a
>> resource-constrained system. Sounds wasteful indeed. But sure, there may
>> well be scenarios where this make sense.
>>
>>> - I went and traced this unit tests on my laptop by monitoring
>>> kmem:mm_page_alloc, though there may be a better metrics when it comes
>>> to memory consumption.
>>>
>>> # dir=build; perf stat -e kmem:mm_page_alloc -- tests/testsuite -C
>>> $dir/tests AUTOTEST_PATH=$dir/utilities:$dir/vswitchd:$dir/ovsdb:$dir/vtep:$dir/tests:$dir/ipsec::
>>> 2154
>>>
>>> Which gives:
>>> - 1 635 489      kmem:mm_page_alloc for v23.11
>>> - 5 777 043      kmem:mm_page_alloc for v24.11
>>>
>>
>> Interesting. What is vm.overcommit_memory set to?
> 
> # cat /proc/sys/vm/overcommit_memory
> 0
> 
> And I am not sure what is being used in Ubuntu CI.
> 
> But the problem is, in the end, simpler.
> 
> [snip]
> 
>>
>>> There is a 4M difference, where I would expect 128k.
>>> So something more happens, than a simple page allocation per lcore,
>>> though I fail to understand what.
> 
> Isolating the perf events for one process of this huge test, I counted
> 4878 page alloc calls.
>  From them, 4108 had rte_lcore_var_alloc in their calling stack which
> is unexpected.
> 
> After spending some time reading glibc, I noticed alloc_perturb().
> *bingo*, I remembered that OVS unit tests are run with MALLOC_PERTURB_
> (=165 after double checking OVS sources).
> 
> """
> Tunable: glibc.malloc.perturb
> 
> This tunable supersedes the MALLOC_PERTURB_ environment variable and
> is identical in features.
> 
> If set to a non-zero value, memory blocks are initialized with values
> depending on some low order bits of this tunable when they are
> allocated (except when allocated by calloc) and freed. This can be
> used to debug the use of uninitialized or freed heap memory. Note that
> this option does not guarantee that the freed block will have any
> specific values. It only guarantees that the content the block had
> before it was freed will be overwritten.
> 
> The default value of this tunable is ‘0’.
> """
> 

OK, excellent work, detective. :)

Do have a workaround for this issue, so that this test suite will work 
with vanilla DPDK 24.11? I guess OVS wants to keep the PERTURB settings.

The fix you've suggested will solve this issue for the no-DPDK-usage 
case. I'm guessing allocating the first lcore var block off of the BSS 
(e.g., via a static variable) would as well, in addition to solving 
similar cases but where there is "light" DPDK usage (i.e., 
rte_eal_init() is called, but with no real app).

> Now, reproducing this out of the test:
> 
> $ perf stat -e kmem:mm_page_alloc -- ./build/ovsdb/ovsdb-client --help
>> /dev/null
>   Performance counter stats for './build/ovsdb/ovsdb-client --help':
>                 810      kmem:mm_page_alloc
>         0,003277941 seconds time elapsed
>         0,003260000 seconds user
>         0,000000000 seconds sys
> 
> $ MALLOC_PERTURB_=165 perf stat -e kmem:mm_page_alloc --
> ./build/ovsdb/ovsdb-client --help >/dev/null
>   Performance counter stats for './build/ovsdb/ovsdb-client --help':
>               4 789      kmem:mm_page_alloc
>         0,008766171 seconds time elapsed
>         0,000976000 seconds user
>         0,007794000 seconds sys
> 
> So the issue is not triggered by mlock'd memory, but by the whole
> buffer of 16M for lcore variables being touched by a glibc debugging
> feature.
> > And in Ubuntu CI, it translated to requesting 16G.
> 
>>>
>>>
>>> Btw, just focusing on lcore var, I did two more tests:
>>> - 1 606 998      kmem:mm_page_alloc for v24.11 + revert all lcore var changes.
>>> - 1 634 606      kmem:mm_page_alloc for v24.11 + current series with
>>> postponed allocations.
>>>
>>>
>>
>> If one move initialization to shared object constructors (from having
>> been at some later time), and then end up not running that
>> initialization code at all (e.g., DPDK is not used), those code pages
>> will increase RSS. That might well hurt more than the lcore variable
>> memory itself, depending on how much code is run.
>>
>> However, such read-only pages can be replaced with something more useful
>> if the system is under memory pressure, so they aren't really a big
>> issue as far as (real) memory footprint is concerned.
>>
>> Just linking to DPDK (and its dependencies) already came with a 1-7 MB
>> RSS penalty, prior to lcore variables. I wonder how much of that goes
>> away if all RTE_INIT() type constructors are removed.
> 
> Regardless of the RSS change, removing completely constructors is not simple.
> Postponing *all* existing constructors from DPDK code would be an ABI
> breakage, as RTE_INIT have a priority notion and an application
> callbacks using RTE_INIT may rely on this.

Agreed.

> Just deferring "unprioritised" constructors would be doable on paper,
> but the location in rte_eal_init where those are is deferred would
> have to be carefully evaluated (with -d plugins in mind).
> 
> 

It seems to me that a reworking of this area should have a bigger scope 
than just addressing this issue.

RTE_INIT() should probably be deprecated, and DPDK shouldn't encourage 
the use of shared-object level constructors.

For dynamically loaded modules (-d), there needs to be some kind of 
replacement, serving the same function.

There should probably be some way to hook into the initialization 
process (available also for apps), which should all happen at 
rte_eal_init() (or later).

Does the priority concept make sense? At least conceptually, the 
initialization should be based off a dependency graph (DAG).

You could reduce the priorities to a number of named stages (just like 
in FreeBSD or Linux). A minor tweak to the current model. However, in 
DPDK, it would be useful if a generic facility could be used by apps, 
and thus the number and names of the stages are open ended (unlike the 
UNIX kernels').

You could rely on explicit initialization alone, where each module 
initializes it's dependencies. That would lead to repeated init function 
calls on the same module, unless there's some init framework help from 
EAL to prevent that. Overall, that would lead to more code, where 
various higher-level modules needs to initialize many dependencies.

Maybe the DAG is available on the build (meson) level, and thus the code 
can be generated out of that?

Some random thoughts.


^ permalink raw reply	[relevance 0%]

* [PATCH v5] graph: mcore: optimize graph search
  2024-11-14  8:45  5%     ` [PATCH v4 1/2] graph: mcore: optimize graph search Huichao Cai
  2024-11-14  8:45  5%       ` [PATCH v4 2/2] graph: add alignment to the member of rte_node Huichao Cai
@ 2024-12-13  2:21 10%       ` Huichao Cai
  2024-12-13 14:36  3%         ` David Marchand
  2024-12-16  1:43 11%         ` [PATCH v6] " Huichao Cai
  1 sibling, 2 replies; 169+ results
From: Huichao Cai @ 2024-12-13  2:21 UTC (permalink / raw)
  To: jerinj, kirankumark, ndabilpuram, yanzhirun_163; +Cc: dev

In the function __rte_graph_mcore_dispatch_sched_node_enqueue,
use a slower loop to search for the graph, modify the search logic
to record the result of the first search, and use this record for
subsequent searches to improve search speed.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 doc/guides/rel_notes/release_25_03.rst     |  2 ++
 lib/graph/rte_graph_model_mcore_dispatch.c | 11 +++++++----
 lib/graph/rte_graph_worker_common.h        |  1 +
 3 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
index 426dfcd982..55ffe8170d 100644
--- a/doc/guides/rel_notes/release_25_03.rst
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -102,6 +102,8 @@ ABI Changes
 
 * No ABI change that would break compatibility with 24.11.
 
+* graph: Added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
+
 
 Known Issues
 ------------
diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c b/lib/graph/rte_graph_model_mcore_dispatch.c
index a590fc9497..a81d338227 100644
--- a/lib/graph/rte_graph_model_mcore_dispatch.c
+++ b/lib/graph/rte_graph_model_mcore_dispatch.c
@@ -118,11 +118,14 @@ __rte_graph_mcore_dispatch_sched_node_enqueue(struct rte_node *node,
 					      struct rte_graph_rq_head *rq)
 {
 	const unsigned int lcore_id = node->dispatch.lcore_id;
-	struct rte_graph *graph;
+	struct rte_graph *graph = node->dispatch.graph;
 
-	SLIST_FOREACH(graph, rq, next)
-		if (graph->dispatch.lcore_id == lcore_id)
-			break;
+	if (unlikely((!graph) || (graph->dispatch.lcore_id != lcore_id))) {
+		SLIST_FOREACH(graph, rq, next)
+			if (graph->dispatch.lcore_id == lcore_id)
+				break;
+		node->dispatch.graph = graph;
+	}
 
 	return graph != NULL ? __graph_sched_node_enqueue(node, graph) : false;
 }
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index d3ec88519d..aef0f65673 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
+			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
 
-- 
2.27.0


^ permalink raw reply	[relevance 10%]

* Re: [PATCH v5] graph: mcore: optimize graph search
  2024-12-13  2:21 10%       ` [PATCH v5] graph: mcore: optimize graph search Huichao Cai
@ 2024-12-13 14:36  3%         ` David Marchand
  2024-12-16  1:43 11%         ` [PATCH v6] " Huichao Cai
  1 sibling, 0 replies; 169+ results
From: David Marchand @ 2024-12-13 14:36 UTC (permalink / raw)
  To: Huichao Cai
  Cc: jerinj, kirankumark, ndabilpuram, yanzhirun_163, dev, Thomas Monjalon

On Fri, Dec 13, 2024 at 3:22 AM Huichao Cai <chcchc88@163.com> wrote:
> diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
> index d3ec88519d..aef0f65673 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
>                         unsigned int lcore_id;  /**< Node running lcore. */
>                         uint64_t total_sched_objs; /**< Number of objects scheduled. */
>                         uint64_t total_sched_fail; /**< Number of scheduled failure. */
> +                       struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
>                 } dispatch;
>         };

The rte_node struct size is not changed with this patch.
In v24.11, rte_node objects are populated/allocated in
graph_nodes_populate which zero's the whole rte_node.
So this change looks safe from an ABI compat with v24.11 pov.

However, we need to waive the warning from libabigail:
http://mails.dpdk.org/archives/test-report/2024-December/834167.html

Please add a temporary exception in devtools/libabigail.abignore.

It should be something like:
[suppress_type]
       name = rte_node
       has_size_change = no
       has_data_member_inserted_between =
{offset_of(total_sched_fail), offset_of(xstat_off)}


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* [PATCH v6] graph: mcore: optimize graph search
  2024-12-13  2:21 10%       ` [PATCH v5] graph: mcore: optimize graph search Huichao Cai
  2024-12-13 14:36  3%         ` David Marchand
@ 2024-12-16  1:43 11%         ` Huichao Cai
  2024-12-16 14:49  4%           ` David Marchand
  1 sibling, 1 reply; 169+ results
From: Huichao Cai @ 2024-12-16  1:43 UTC (permalink / raw)
  Cc: dev, jerinj, kirankumark, ndabilpuram, yanzhirun_163

In the function __rte_graph_mcore_dispatch_sched_node_enqueue,
use a slower loop to search for the graph, modify the search logic
to record the result of the first search, and use this record for
subsequent searches to improve search speed.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 devtools/libabigail.abignore               |  5 +++++
 doc/guides/rel_notes/release_25_03.rst     |  2 ++
 lib/graph/rte_graph_model_mcore_dispatch.c | 11 +++++++----
 lib/graph/rte_graph_worker_common.h        |  1 +
 4 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
index 21b8cd6113..a92ee29512 100644
--- a/devtools/libabigail.abignore
+++ b/devtools/libabigail.abignore
@@ -33,3 +33,8 @@
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Temporary exceptions till next major ABI version ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+[suppress_type]
+       name = rte_node
+       has_size_change = no
+       has_data_member_inserted_between =
+{offset_of(total_sched_fail), offset_of(xstat_off)}
diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
index 426dfcd982..55ffe8170d 100644
--- a/doc/guides/rel_notes/release_25_03.rst
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -102,6 +102,8 @@ ABI Changes
 
 * No ABI change that would break compatibility with 24.11.
 
+* graph: Added ``graph`` field to the ``dispatch`` structure in the ``rte_node`` structure.
+
 
 Known Issues
 ------------
diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c b/lib/graph/rte_graph_model_mcore_dispatch.c
index a590fc9497..a81d338227 100644
--- a/lib/graph/rte_graph_model_mcore_dispatch.c
+++ b/lib/graph/rte_graph_model_mcore_dispatch.c
@@ -118,11 +118,14 @@ __rte_graph_mcore_dispatch_sched_node_enqueue(struct rte_node *node,
 					      struct rte_graph_rq_head *rq)
 {
 	const unsigned int lcore_id = node->dispatch.lcore_id;
-	struct rte_graph *graph;
+	struct rte_graph *graph = node->dispatch.graph;
 
-	SLIST_FOREACH(graph, rq, next)
-		if (graph->dispatch.lcore_id == lcore_id)
-			break;
+	if (unlikely((!graph) || (graph->dispatch.lcore_id != lcore_id))) {
+		SLIST_FOREACH(graph, rq, next)
+			if (graph->dispatch.lcore_id == lcore_id)
+				break;
+		node->dispatch.graph = graph;
+	}
 
 	return graph != NULL ? __graph_sched_node_enqueue(node, graph) : false;
 }
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index d3ec88519d..aef0f65673 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
 			unsigned int lcore_id;  /**< Node running lcore. */
 			uint64_t total_sched_objs; /**< Number of objects scheduled. */
 			uint64_t total_sched_fail; /**< Number of scheduled failure. */
+			struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
 		} dispatch;
 	};
 
-- 
2.27.0


^ permalink raw reply	[relevance 11%]

* DTS WG Meeting Minutes - December 5, 2024
@ 2024-12-16  4:14  4% Patrick Robb
  0 siblings, 0 replies; 169+ results
From: Patrick Robb @ 2024-12-16  4:14 UTC (permalink / raw)
  To: dev; +Cc: ci, dts

[-- Attachment #1: Type: text/plain, Size: 1807 bytes --]

#####################################################################
December 5, 2024
Attendees
* Patrick Robb
* Paul Szczepanek
* Luca Vizzarro

#####################################################################
Minutes

=====================================================================
General Discussion
* CI Testing labs discussion:
   * ABI testing can begin again
      * UNH is rebuilding its container images with the new v25 ABI
reference.
   * Still debugging some issues with cryptodev device creation on the
Marvell CN10K device. Going to rebuild the Marvell SDK with version 12 and
reflash the board.
   * AMD donated servers have been rack mounted, provisioned with ubuntu
24.04 and DTS/DPDK dependencies.
   * ARM Grace server delivery date is 12/23
* Patrick and Aaron had a call with AWS about setting up a CI “Lab” for AWS
which would do per patch testing for the test-report mailing list
* xSightLabs got a DTS demo from Patrick - they are using both legacy DTS
and new DTS in parallel
* December 26 CI meeting is cancelled
* January 2 DTS meeting is cancelled
* DPDK 24.11 has been released
* 25.03 roadmap status:
https://docs.google.com/document/d/1doTZOOpkv4D5P2w6K7fEJpa_CjzrlMl3mCeDBWtxnko/edit

=====================================================================
Patch discussions
* Ruff:
   * The default rules are too minimal, but we don’t need to use literally
every rule. Luca will look for a recommended set of rules to use online
* Flow rule dataclass v5 series is submitted
* Bugzilla discussions

=====================================================================
Any other business
* Patrick will invite the Microsoft Azure testers to the DTS meetings
   * mamcgove@microsoft.com
* Next meeting Dec 19, 2024

[-- Attachment #2: Type: text/html, Size: 2050 bytes --]

^ permalink raw reply	[relevance 4%]

* Community CI Meeting Minutes - December 12, 2024
@ 2024-12-16  4:18  3% Patrick Robb
  0 siblings, 0 replies; 169+ results
From: Patrick Robb @ 2024-12-16  4:18 UTC (permalink / raw)
  To: ci; +Cc: dev, dts

[-- Attachment #1: Type: text/plain, Size: 3479 bytes --]

#####################################################################
December 12, 2024
Attendees
1. Patrick Robb
2. Aaron Conole
3. Paul Szczepanek
4. Luca Vizzarro

#####################################################################
Minutes

=====================================================================
General Announcements
* Dts roadmap:
https://docs.google.com/document/d/1doTZOOpkv4D5P2w6K7fEJpa_CjzrlMl3mCeDBWtxnko/edit?tab=t.0
* AWS:
   * They have confirmed that AWS ARM Graviton systems will be included in
CI testing
* Aaron: It would be ideal if we could update some of the scripts in
dpdk-ci repo such that the systems being brought over across labs are more
uniform
   * Polling patchwork is one example of a script requiring an update

=====================================================================
CI Status

---------------------------------------------------------------------
UNH-IOL Community Lab
* ABI Testing: UNH is building new container images now.
   * Libabigail 2.6 should be used going forward, which comes with a new
dependency “libxxhash”
* Marvell SDK 12: We are still having trouble setting up the Marvell crypto
devices, so we are upgrading to the latest SDK and reflashing the board.
* Maintenance:
   * Migrating and upgrading our Jenkins instance next Monday at 15:00 UTC
- downtime expected to be a few hours.
* Working on turning on new DTS in CI testing currently.
* “Retest Button” For periodic testing is in code review now, expected to
be available on the DPDK Dashboard next week.
* Dpdk-ci repo:
   * Need a review and merge of the create_series_artifact.py patch which
adds the meson check script.

---------------------------------------------------------------------
Intel Lab
* None

---------------------------------------------------------------------
Github Actions
* None

---------------------------------------------------------------------
Loongarch Lab
* None

=====================================================================
DTS Improvements & Test Development
* Patrick will do a review for the Ruff patch
* Paramiko:
   * bug has been resolved by Nick, by setting a while loop in which we
wait for the expected prompt to enter the paramiko buffer.
   * There is another Paramiko race condition in which a file read/write
error bubbles up at the conclusion of the DTS execution (may be a race
condition). This does not affect the testrun but it pollutes the logs, so
we should investigate this during the 25.03 cycle.
* Pending patches from the previous release: Work to review and merge Ruff
very quickly, then rebase all the old patches (in groups) and quickly merge
those groups.
   * Will rebase and review 1 old patchseries per developer per week in
December/January, and we should be
* Scapy/MyPy updates:
   * The new Scapy version includes better type support - we will update
within the 25.03 cycle.
* Poetry.lock file is committed to the repo (it is not included in the
.gitignore). After some discussion we have confirmed that this is correct,
and that maintainers should periodically update the poetry.lock file in the
remote repo.
   * Lock file makes dependency resolution faster
   * Lock file provides a universal lock of dependencies across python
versions, across time

=====================================================================
Any other business
* Next Meeting Jan 16, 2024

[-- Attachment #2: Type: text/html, Size: 3772 bytes --]

^ permalink raw reply	[relevance 3%]

* Re: [PATCH v6] graph: mcore: optimize graph search
  2024-12-16  1:43 11%         ` [PATCH v6] " Huichao Cai
@ 2024-12-16 14:49  4%           ` David Marchand
  2024-12-17  9:04  0%             ` David Marchand
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2024-12-16 14:49 UTC (permalink / raw)
  To: Dodji Seketeli
  Cc: dev, jerinj, kirankumark, ndabilpuram, yanzhirun_163, Huichao Cai

Salut Dodji,

On Mon, Dec 16, 2024 at 2:44 AM Huichao Cai <chcchc88@163.com> wrote:
> diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
> index 21b8cd6113..a92ee29512 100644
> --- a/devtools/libabigail.abignore
> +++ b/devtools/libabigail.abignore
> @@ -33,3 +33,8 @@
>  ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
>  ; Temporary exceptions till next major ABI version ;
>  ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
> +[suppress_type]
> +       name = rte_node
> +       has_size_change = no
> +       has_data_member_inserted_between =
> +{offset_of(total_sched_fail), offset_of(xstat_off)}

Here is a suppression rule I suggested but does not have the intended effect.

For the context:

Before the change (that you can find below with the next hunk), we
made sure to zero the whole rte_node object at runtime in the library
allocator.
And the offset of the field next to 'dispatch' is fixed with an
explicit alignas() statement.

        /** Fast schedule area for mcore dispatch model. */
        union {
                alignas(RTE_CACHE_LINE_MIN_SIZE) struct {
                        unsigned int lcore_id;  /**< Node running
lcore. */
                        uint64_t total_sched_objs; /**< Number of
objects scheduled. */
                        uint64_t total_sched_fail; /**< Number of
scheduled failure. */
                } dispatch;
        };

        /** Fast path area cache line 1. */
        alignas(RTE_CACHE_LINE_MIN_SIZE)
        rte_graph_off_t xstat_off; /**< Offset to xstat counters. */

If you want the whole definition, you can have a look at:
https://git.dpdk.org/dpdk/tree/lib/graph/rte_graph_worker_common.h#n87

[...]

> diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
> index d3ec88519d..aef0f65673 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -110,6 +110,7 @@ struct __rte_cache_aligned rte_node {
>                         unsigned int lcore_id;  /**< Node running lcore. */
>                         uint64_t total_sched_objs; /**< Number of objects scheduled. */
>                         uint64_t total_sched_fail; /**< Number of scheduled failure. */
> +                       struct rte_graph *graph;  /**< Graph corresponding to lcore_id. */
>                 } dispatch;
>         };

Now, the patch adds a new field in the struct {} dispatch field.

Here is what abidiff reports:

$ abidiff --version
abidiff: 2.6.0

$ abidiff --suppr
/home/dmarchan/git/pub/dpdk.org/main/devtools/libabigail.abignore
--no-added-syms --headers-dir1
/home/dmarchan/abi/v24.11/build-gcc-shared/usr/local/include
--headers-dir2 /home/dmarchan/builds/main/build-gcc-shared/install/usr/local/include
/home/dmarchan/abi/v24.11/build-gcc-shared/usr/local/lib64/librte_graph.so.25.0
/home/dmarchan/builds/main/build-gcc-shared/install/usr/local/lib64/librte_graph.so.25.1
Functions changes summary: 0 Removed, 1 Changed (9 filtered out), 0
Added functions
Variables changes summary: 0 Removed, 0 Changed, 0 Added variable

1 function with some indirect sub-type change:

  [C] 'function bool
__rte_graph_mcore_dispatch_sched_node_enqueue(rte_node*,
rte_graph_rq_head*)' at rte_graph_model_mcore_dispatch.c:117:1 has
some indirect sub-type changes:
    parameter 1 of type 'rte_node*' has sub-type changes:
      in pointed to type 'struct rte_node' at rte_graph_worker_common.h:92:1:
        type size hasn't changed
        1 data member changes (2 filtered):
          anonymous data member at offset 1536 (in bits) changed from:
            union {struct {unsigned int lcore_id; uint64_t
total_sched_objs; uint64_t total_sched_fail;} dispatch;}
          to:
            union {struct {unsigned int lcore_id; uint64_t
total_sched_objs; uint64_t total_sched_fail; rte_graph* graph;}
dispatch;}


What would be the best way to suppress this warning?
I tried the following which seems to work, but I prefer to ask for your advice.

[suppress_type]
    name = rte_node
    has_data_member_at = offset_of(total_sched_fail)


Thanks.


-- 
David Marchand


^ permalink raw reply	[relevance 4%]

* Re: [PATCH v6] graph: mcore: optimize graph search
  2024-12-16 14:49  4%           ` David Marchand
@ 2024-12-17  9:04  0%             ` David Marchand
  0 siblings, 0 replies; 169+ results
From: David Marchand @ 2024-12-17  9:04 UTC (permalink / raw)
  To: Dodji Seketeli
  Cc: dev, jerinj, kirankumark, ndabilpuram, yanzhirun_163, Huichao Cai

On Mon, Dec 16, 2024 at 3:49 PM David Marchand
<david.marchand@redhat.com> wrote:
> $ abidiff --suppr
> /home/dmarchan/git/pub/dpdk.org/main/devtools/libabigail.abignore
> --no-added-syms --headers-dir1
> /home/dmarchan/abi/v24.11/build-gcc-shared/usr/local/include
> --headers-dir2 /home/dmarchan/builds/main/build-gcc-shared/install/usr/local/include
> /home/dmarchan/abi/v24.11/build-gcc-shared/usr/local/lib64/librte_graph.so.25.0
> /home/dmarchan/builds/main/build-gcc-shared/install/usr/local/lib64/librte_graph.so.25.1
> Functions changes summary: 0 Removed, 1 Changed (9 filtered out), 0
> Added functions
> Variables changes summary: 0 Removed, 0 Changed, 0 Added variable
>
> 1 function with some indirect sub-type change:
>
>   [C] 'function bool
> __rte_graph_mcore_dispatch_sched_node_enqueue(rte_node*,
> rte_graph_rq_head*)' at rte_graph_model_mcore_dispatch.c:117:1 has
> some indirect sub-type changes:
>     parameter 1 of type 'rte_node*' has sub-type changes:
>       in pointed to type 'struct rte_node' at rte_graph_worker_common.h:92:1:
>         type size hasn't changed
>         1 data member changes (2 filtered):
>           anonymous data member at offset 1536 (in bits) changed from:
>             union {struct {unsigned int lcore_id; uint64_t
> total_sched_objs; uint64_t total_sched_fail;} dispatch;}
>           to:
>             union {struct {unsigned int lcore_id; uint64_t
> total_sched_objs; uint64_t total_sched_fail; rte_graph* graph;}
> dispatch;}
>
>
> What would be the best way to suppress this warning?
> I tried the following which seems to work, but I prefer to ask for your advice.
>
> [suppress_type]
>     name = rte_node
>     has_data_member_at = offset_of(total_sched_fail)

Gah.. I meant has_data_member_inserted_at.
But then testing with has_data_member_inserted_at, the warning is not
suppressed either...

Any help appreciated.


-- 
David Marchand


^ permalink raw reply	[relevance 0%]

* Re: rte_event_eth_tx_adapter_enqueue() short enqueue
  @ 2024-12-19 17:12  3%         ` Bruce Richardson
  0 siblings, 0 replies; 169+ results
From: Bruce Richardson @ 2024-12-19 17:12 UTC (permalink / raw)
  To: Morten Brørup
  Cc: Mattias Rönnblom, dev, Jerin Jacob Kollanukkaran,
	Daniel Östman, Naga Harish K S V, nils.wiberg, gyumin.hwang,
	changshik.lee, Mattias Rönnblom

On Thu, Dec 19, 2024 at 04:59:33PM +0100, Morten Brørup wrote:
> > From: Bruce Richardson [mailto:bruce.richardson@intel.com]
> > Sent: Wednesday, 27 November 2024 12.07
> > 
> > On Wed, Nov 27, 2024 at 11:53:50AM +0100, Mattias Rönnblom wrote:
> > > On 2024-11-27 11:38, Bruce Richardson wrote:
> > > > On Wed, Nov 27, 2024 at 11:03:31AM +0100, Mattias Rönnblom wrote:
> > > > > Hi.
> > > > >
> > > > > Consider the following situation:
> > > > >
> > > > > An application does
> > > > >
> > > > > rte_event_eth_tx_adapter_enqueue()
> > > > >
> > > > > and due to back-pressure or some other reason not all
> > events/packets could
> > > > > be enqueued, and a count lower than the nb_events input parameter
> > is
> > > > > returned.
> > > > >
> > > > > The API says that "/../ the remaining events at the end of ev[]
> > are not
> > > > > consumed and the caller has to take care of them /../".
> > > > >
> > > > > May an event device rearrange the ev array so that any enqueue
> > failures are
> > > > > put last in the ev array?
> > > > >
> > > > > In other words: does the "at the end of ev[]" mean "at the end of
> > ev[] as
> > > > > the call has completed", or is the event array supposed to be
> > untouched, and
> > > > > thus the same events are at the end both before and after the
> > call.
> > > > >
> > > > > The ev array pointer is not const, so from that perspective it
> > may be
> > > > > modified.
> > > > >
> > > > > This situation may occur for example the bonding driver is used
> > under the
> > > > > hood. The bonding driver does this kind of rearrangements on the
> > ethdev
> > > > > level.
> > > > >
> > > >
> > > > Interesting question. I tend to think that we should not proclude
> > this
> > > > reordering, as it should allow e.g  an eventdev which is short on
> > space to
> > > > selectively enqueue only the high priority events.
> > > >
> > >
> > > Allowing reordering may be a little surprising to the user. At least
> > it
> > > would be for me.
> > >
> > > Other eventdev APIs enqueue do not allow this kind of reordering
> > (with
> > > const-marked arrays).
> > >
> > 
> > That is a good point. I forgot that the events are directly passed to
> > the
> > enqueue functions rather than being passed as pointers, which could
> > then be
> > reordered without modifying the underlying events.
> > 
> > > That said, I lean toward agreeing with you, since it will solve the
> > ethdev
> > > tx_burst() mapping issue mentioned.
> > >
> > 
> > If enabling this solves a real problem, then let's allow it, despite
> > the
> > inconsistency in the APIs. Again, though, we need to to call this out
> > in
> > the docs very prominently to avoid surprises.
> > 
> > Alternatively, do we want to add a separate API that explicitly allows
> > reordering, and update the existing API to have a const value
> > parameter?
> > For drivers that don't implement the reordering they can just not
> > provide
> > the reordering function and the non-reorder version can be used
> > transparently instead.
> 
> IMHO, allowing reordering with the current API would break the developer's reasonable expectations of the API.
> Breaking reasonable expectations could be considered an API break.
> 
> Some application may have a parallel array with metadata about the events.
> If the events are reordered (and the last N of them deferred to the application to process), the application can no longer index into the metadata array (to process the metadata of the deferred events).
> 
> For reference, consider the SORING proposed by Konstantin.
> 
> Regarding "const":
> It's my impression that "const" is missing in lots of APIs where the parameter must not be modified.

+1 to this.
Fortunately, if we find ones where it is missing, it's not an ABI or API
break to add in the missing const clarification. Therefore, let's add these
consts in whenever we spot them missing!

> So, developers cannot rely on "const" as an indication if a passed parameter might be modified or not.
> Obviously, "const" cannot be modified. But no "const" does not imply that the parameter is contractually modifiable by the function.
> 

Or more specifically, the develop cannot derive any information from the
absence of const in our APIs - the parameters might be modified, but then
again they may not.

Sometimes this causes problems, where application code wants to have
const-correctness but is blocked by a DPDK function where it logically
should not modify parameters e.g. a configure function, but is not
explicitly committing via const not to.

/Bruce

^ permalink raw reply	[relevance 3%]

* [v1 15/16] crypto/virtio: add vhost backend to virtio_user
  @ 2024-12-24  7:37  1% ` Gowrishankar Muthukrishnan
  0 siblings, 0 replies; 169+ results
From: Gowrishankar Muthukrishnan @ 2024-12-24  7:37 UTC (permalink / raw)
  To: dev, Akhil Goyal, Maxime Coquelin, Chenbo Xia, Fan Zhang, Jay Zhou
  Cc: jerinj, anoobj, Rajesh Mudimadugula, Gowrishankar Muthukrishnan

Add vhost backend to virtio_user crypto.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/crypto/virtio/meson.build             |   7 +
 drivers/crypto/virtio/virtio_cryptodev.c      |  57 +-
 drivers/crypto/virtio/virtio_cryptodev.h      |   3 +
 drivers/crypto/virtio/virtio_pci.h            |   7 +
 drivers/crypto/virtio/virtio_ring.h           |   6 -
 .../crypto/virtio/virtio_user/vhost_vdpa.c    | 310 +++++++
 .../virtio/virtio_user/virtio_user_dev.c      | 774 ++++++++++++++++++
 .../virtio/virtio_user/virtio_user_dev.h      |  88 ++
 drivers/crypto/virtio/virtio_user_cryptodev.c | 586 +++++++++++++
 9 files changed, 1810 insertions(+), 28 deletions(-)
 create mode 100644 drivers/crypto/virtio/virtio_user/vhost_vdpa.c
 create mode 100644 drivers/crypto/virtio/virtio_user/virtio_user_dev.c
 create mode 100644 drivers/crypto/virtio/virtio_user/virtio_user_dev.h
 create mode 100644 drivers/crypto/virtio/virtio_user_cryptodev.c

diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build
index a4954a094b..a178a61487 100644
--- a/drivers/crypto/virtio/meson.build
+++ b/drivers/crypto/virtio/meson.build
@@ -17,3 +17,10 @@ sources = files(
         'virtio_rxtx.c',
         'virtqueue.c',
 )
+
+if is_linux
+    sources += files('virtio_user_cryptodev.c',
+        'virtio_user/vhost_vdpa.c',
+        'virtio_user/virtio_user_dev.c')
+    deps += ['bus_vdev', 'common_virtio']
+endif
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
index 159e96f7db..e9e65366fe 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.c
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -544,24 +544,12 @@ virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
 	return 0;
 }
 
-/*
- * This function is based on probe() function
- * It returns 0 on success.
- */
-static int
-crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
-		struct rte_cryptodev_pmd_init_params *init_params)
+int
+crypto_virtio_dev_init(struct rte_cryptodev *cryptodev, uint64_t features,
+		struct rte_pci_device *pci_dev)
 {
-	struct rte_cryptodev *cryptodev;
 	struct virtio_crypto_hw *hw;
 
-	PMD_INIT_FUNC_TRACE();
-
-	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
-					init_params);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
 	cryptodev->driver_id = cryptodev_virtio_driver_id;
 	cryptodev->dev_ops = &virtio_crypto_dev_ops;
 
@@ -578,16 +566,41 @@ crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
 	hw->dev_id = cryptodev->data->dev_id;
 	hw->virtio_dev_capabilities = virtio_capabilities;
 
-	VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
-		cryptodev->data->dev_id, pci_dev->id.vendor_id,
-		pci_dev->id.device_id);
+	if (pci_dev) {
+		/* pci device init */
+		VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+			cryptodev->data->dev_id, pci_dev->id.vendor_id,
+			pci_dev->id.device_id);
 
-	/* pci device init */
-	if (vtpci_cryptodev_init(pci_dev, hw))
+		if (vtpci_cryptodev_init(pci_dev, hw))
+			return -1;
+	}
+
+	if (virtio_crypto_init_device(cryptodev, features) < 0)
 		return -1;
 
-	if (virtio_crypto_init_device(cryptodev,
-			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+	return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+		struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *cryptodev;
+
+	PMD_INIT_FUNC_TRACE();
+
+	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+					init_params);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (crypto_virtio_dev_init(cryptodev, VIRTIO_CRYPTO_PMD_GUEST_FEATURES,
+			pci_dev) < 0)
 		return -1;
 
 	rte_cryptodev_pmd_probing_finish(cryptodev);
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
index b4bdd9800b..95a1e09dca 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.h
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -74,4 +74,7 @@ uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
 		struct rte_crypto_op **tx_pkts,
 		uint16_t nb_pkts);
 
+int crypto_virtio_dev_init(struct rte_cryptodev *cryptodev, uint64_t features,
+		struct rte_pci_device *pci_dev);
+
 #endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
index 79945cb88e..c75777e005 100644
--- a/drivers/crypto/virtio/virtio_pci.h
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -20,6 +20,9 @@ struct virtqueue;
 #define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
 #define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
 
+/* VirtIO device IDs. */
+#define VIRTIO_ID_CRYPTO  20
+
 /* VirtIO ABI version, this must match exactly. */
 #define VIRTIO_PCI_ABI_VERSION 0
 
@@ -56,8 +59,12 @@ struct virtqueue;
 #define VIRTIO_CONFIG_STATUS_DRIVER    0x02
 #define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
 #define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_DEV_NEED_RESET	0x40
 #define VIRTIO_CONFIG_STATUS_FAILED    0x80
 
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_VRING_ALIGN 4096
+
 /*
  * Each virtqueue indirect descriptor list must be physically contiguous.
  * To allow us to malloc(9) each list individually, limit the number
diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h
index c74d1172b7..4b418f6e60 100644
--- a/drivers/crypto/virtio/virtio_ring.h
+++ b/drivers/crypto/virtio/virtio_ring.h
@@ -181,12 +181,6 @@ vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
 				sizeof(struct vring_packed_desc_event)), align);
 }
 
-static inline void
-vring_init(struct vring *vr, unsigned int num, uint8_t *p, unsigned long align)
-{
-	vring_init_split(vr, p, 0, align, num);
-}
-
 /*
  * The following is used with VIRTIO_RING_F_EVENT_IDX.
  * Assuming a given event_idx value from the other size, if we have
diff --git a/drivers/crypto/virtio/virtio_user/vhost_vdpa.c b/drivers/crypto/virtio/virtio_user/vhost_vdpa.c
new file mode 100644
index 0000000000..3fedade775
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/vhost_vdpa.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Marvell
+ */
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_memory.h>
+
+#include "virtio_user/vhost.h"
+
+#include "virtio_user_dev.h"
+#include "../virtio_pci.h"
+
+struct vhost_vdpa_data {
+	int vhostfd;
+	uint64_t protocol_features;
+};
+
+#define VHOST_VDPA_SUPPORTED_BACKEND_FEATURES		\
+	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2	|	\
+	1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
+
+/* vhost kernel & vdpa ioctls */
+#define VHOST_VIRTIO 0xAF
+#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
+#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
+#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
+#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
+#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
+#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
+#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
+#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
+#define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
+#define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
+#define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
+#define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+	uint64_t iova;
+	uint64_t size;
+	uint64_t uaddr;
+#define VHOST_ACCESS_RO      0x1
+#define VHOST_ACCESS_WO      0x2
+#define VHOST_ACCESS_RW      0x3
+	uint8_t perm;
+#define VHOST_IOTLB_MISS           1
+#define VHOST_IOTLB_UPDATE         2
+#define VHOST_IOTLB_INVALIDATE     3
+#define VHOST_IOTLB_ACCESS_FAIL    4
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
+	uint8_t type;
+};
+
+#define VHOST_IOTLB_MSG_V2 0x2
+
+struct vhost_vdpa_config {
+	uint32_t off;
+	uint32_t len;
+	uint8_t buf[];
+};
+
+struct vhost_msg {
+	uint32_t type;
+	uint32_t reserved;
+	union {
+		struct vhost_iotlb_msg iotlb;
+		uint8_t padding[64];
+	};
+};
+
+
+static int
+vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
+{
+	int ret;
+
+	ret = ioctl(fd, request, arg);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
+				request, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
+}
+
+static int
+vhost_vdpa_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
+}
+
+static int
+vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+	int ret;
+
+	ret = vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_FEATURES, features);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get features");
+		return -1;
+	}
+
+	/* Negotiated vDPA backend features */
+	ret = vhost_vdpa_get_protocol_features(dev, &data->protocol_features);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to get backend features");
+		return -1;
+	}
+
+	data->protocol_features &= VHOST_VDPA_SUPPORTED_BACKEND_FEATURES;
+
+	ret = vhost_vdpa_set_protocol_features(dev, data->protocol_features);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to set backend features");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
+}
+
+/**
+ * Set up environment to talk with a vhost vdpa backend.
+ *
+ * @return
+ *   - (-1) if fail to set up;
+ *   - (>=0) if successful.
+ */
+static int
+vhost_vdpa_setup(struct virtio_user_dev *dev)
+{
+	struct vhost_vdpa_data *data;
+	uint32_t did = (uint32_t)-1;
+
+	data = malloc(sizeof(*data));
+	if (!data) {
+		PMD_DRV_LOG(ERR, "(%s) Faidle to allocate backend data", dev->path);
+		return -1;
+	}
+
+	data->vhostfd = open(dev->path, O_RDWR);
+	if (data->vhostfd < 0) {
+		PMD_DRV_LOG(ERR, "Failed to open %s: %s",
+				dev->path, strerror(errno));
+		free(data);
+		return -1;
+	}
+
+	if (ioctl(data->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
+			did != VIRTIO_ID_CRYPTO) {
+		PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u", did);
+		close(data->vhostfd);
+		free(data);
+		return -1;
+	}
+
+	dev->backend_data = data;
+
+	return 0;
+}
+
+static int
+vhost_vdpa_cvq_enable(struct virtio_user_dev *dev, int enable)
+{
+	struct vhost_vring_state state = {
+		.index = dev->max_queue_pairs,
+		.num   = enable,
+	};
+
+	return vhost_vdpa_set_vring_enable(dev, &state);
+}
+
+static int
+vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
+				uint16_t pair_idx,
+				int enable)
+{
+	struct vhost_vring_state state = {
+		.index = pair_idx,
+		.num   = enable,
+	};
+
+	if (dev->qp_enabled[pair_idx] == enable)
+		return 0;
+
+	if (vhost_vdpa_set_vring_enable(dev, &state))
+		return -1;
+
+	dev->qp_enabled[pair_idx] = enable;
+	return 0;
+}
+
+static int
+vhost_vdpa_update_link_state(struct virtio_user_dev *dev)
+{
+	dev->crypto_status = VIRTIO_CRYPTO_S_HW_READY;
+	return 0;
+}
+
+static int
+vhost_vdpa_get_nr_vrings(struct virtio_user_dev *dev)
+{
+	int nr_vrings = dev->max_queue_pairs;
+
+	return nr_vrings;
+}
+
+static int
+vhost_vdpa_unmap_notification_area(struct virtio_user_dev *dev)
+{
+	int i, nr_vrings;
+
+	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
+
+	for (i = 0; i < nr_vrings; i++) {
+		if (dev->notify_area[i])
+			munmap(dev->notify_area[i], getpagesize());
+	}
+	free(dev->notify_area);
+	dev->notify_area = NULL;
+
+	return 0;
+}
+
+static int
+vhost_vdpa_map_notification_area(struct virtio_user_dev *dev)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+	int nr_vrings, i, page_size = getpagesize();
+	uint16_t **notify_area;
+
+	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
+
+	/* CQ is another vring */
+	nr_vrings++;
+
+	notify_area = malloc(nr_vrings * sizeof(*notify_area));
+	if (!notify_area) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to allocate notify area array", dev->path);
+		return -1;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		notify_area[i] = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED | MAP_FILE,
+					data->vhostfd, i * page_size);
+		if (notify_area[i] == MAP_FAILED) {
+			PMD_DRV_LOG(ERR, "(%s) Map failed for notify address of queue %d",
+					dev->path, i);
+			i--;
+			goto map_err;
+		}
+	}
+	dev->notify_area = notify_area;
+
+	return 0;
+
+map_err:
+	for (; i >= 0; i--)
+		munmap(notify_area[i], page_size);
+	free(notify_area);
+
+	return -1;
+}
+
+struct virtio_user_backend_ops virtio_crypto_ops_vdpa = {
+	.setup = vhost_vdpa_setup,
+	.get_features = vhost_vdpa_get_features,
+	.cvq_enable = vhost_vdpa_cvq_enable,
+	.enable_qp = vhost_vdpa_enable_queue_pair,
+	.update_link_state = vhost_vdpa_update_link_state,
+	.map_notification_area = vhost_vdpa_map_notification_area,
+	.unmap_notification_area = vhost_vdpa_unmap_notification_area,
+};
diff --git a/drivers/crypto/virtio/virtio_user/virtio_user_dev.c b/drivers/crypto/virtio/virtio_user/virtio_user_dev.c
new file mode 100644
index 0000000000..fed740073d
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/virtio_user_dev.c
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Marvell.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_io.h>
+
+#include "virtio_user/vhost.h"
+#include "virtio_logs.h"
+
+#include "cryptodev_pmd.h"
+#include "virtio_crypto.h"
+#include "virtio_cvq.h"
+#include "virtio_user_dev.h"
+#include "virtqueue.h"
+
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
+const char * const crypto_virtio_user_backend_strings[] = {
+	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
+	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
+};
+
+static int
+virtio_user_uninit_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	if (dev->kickfds[queue_sel] >= 0) {
+		close(dev->kickfds[queue_sel]);
+		dev->kickfds[queue_sel] = -1;
+	}
+
+	if (dev->callfds[queue_sel] >= 0) {
+		close(dev->callfds[queue_sel]);
+		dev->callfds[queue_sel] = -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_init_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	/* May use invalid flag, but some backend uses kickfd and
+	 * callfd as criteria to judge if dev is alive. so finally we
+	 * use real event_fd.
+	 */
+	dev->callfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (dev->callfds[queue_sel] < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to setup callfd for queue %u: %s",
+				dev->path, queue_sel, strerror(errno));
+		return -1;
+	}
+	dev->kickfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (dev->kickfds[queue_sel] < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to setup kickfd for queue %u: %s",
+				dev->path, queue_sel, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_destroy_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	struct vhost_vring_state state;
+	int ret;
+
+	state.index = queue_sel;
+	ret = dev->ops->get_vring_base(dev, &state);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to destroy queue %u", dev->path, queue_sel);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+	 * firstly because vhost depends on this msg to allocate virtqueue
+	 * pair.
+	 */
+	struct vhost_vring_file file;
+	int ret;
+
+	file.index = queue_sel;
+	file.fd = dev->callfds[queue_sel];
+	ret = dev->ops->set_vring_call(dev, &file);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	int ret;
+	struct vhost_vring_file file;
+	struct vhost_vring_state state;
+	struct vring *vring = &dev->vrings.split[queue_sel];
+	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+	uint64_t desc_addr, avail_addr, used_addr;
+	struct vhost_vring_addr addr = {
+		.index = queue_sel,
+		.log_guest_addr = 0,
+		.flags = 0, /* disable log */
+	};
+
+	if (queue_sel == dev->max_queue_pairs) {
+		if (!dev->scvq) {
+			PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing",
+					dev->path);
+			goto err;
+		}
+
+		/* Use shadow control queue information */
+		vring = &dev->scvq->vq_split.ring;
+		pq_vring = &dev->scvq->vq_packed.ring;
+	}
+
+	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+		desc_addr = pq_vring->desc_iova;
+		avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+		used_addr =  RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+						VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
+	} else {
+		desc_addr = vring->desc_iova;
+		avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+		used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+					VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
+	}
+
+	state.index = queue_sel;
+	state.num = vring->num;
+	ret = dev->ops->set_vring_num(dev, &state);
+	if (ret < 0)
+		goto err;
+
+	state.index = queue_sel;
+	state.num = 0; /* no reservation */
+	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
+		state.num |= (1 << 15);
+	ret = dev->ops->set_vring_base(dev, &state);
+	if (ret < 0)
+		goto err;
+
+	ret = dev->ops->set_vring_addr(dev, &addr);
+	if (ret < 0)
+		goto err;
+
+	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
+	 * lastly because vhost depends on this msg to judge if
+	 * virtio is ready.
+	 */
+	file.index = queue_sel;
+	file.fd = dev->kickfds[queue_sel];
+	ret = dev->ops->set_vring_kick(dev, &file);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel);
+
+	return -1;
+}
+
+static int
+virtio_user_foreach_queue(struct virtio_user_dev *dev,
+			int (*fn)(struct virtio_user_dev *, uint32_t))
+{
+	uint32_t i, nr_vq;
+
+	nr_vq = dev->max_queue_pairs;
+
+	for (i = 0; i < nr_vq; i++)
+		if (fn(dev, i) < 0)
+			return -1;
+
+	return 0;
+}
+
+int
+crypto_virtio_user_dev_set_features(struct virtio_user_dev *dev)
+{
+	uint64_t features;
+	int ret = -1;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	/* Step 0: tell vhost to create queues */
+	if (virtio_user_foreach_queue(dev, virtio_user_create_queue) < 0)
+		goto error;
+
+	features = dev->features;
+
+	ret = dev->ops->set_features(dev, features);
+	if (ret < 0)
+		goto error;
+	PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
+error:
+	pthread_mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+int
+crypto_virtio_user_start_device(struct virtio_user_dev *dev)
+{
+	int ret;
+
+	/*
+	 * XXX workaround!
+	 *
+	 * We need to make sure that the locks will be
+	 * taken in the correct order to avoid deadlocks.
+	 *
+	 * Before releasing this lock, this thread should
+	 * not trigger any memory hotplug events.
+	 *
+	 * This is a temporary workaround, and should be
+	 * replaced when we get proper supports from the
+	 * memory subsystem in the future.
+	 */
+	rte_mcfg_mem_read_lock();
+	pthread_mutex_lock(&dev->mutex);
+
+	/* Step 2: share memory regions */
+	ret = dev->ops->set_memory_table(dev);
+	if (ret < 0)
+		goto error;
+
+	/* Step 3: kick queues */
+	ret = virtio_user_foreach_queue(dev, virtio_user_kick_queue);
+	if (ret < 0)
+		goto error;
+
+	ret = virtio_user_kick_queue(dev, dev->max_queue_pairs);
+	if (ret < 0)
+		goto error;
+
+	/* Step 4: enable queues */
+	for (int i = 0; i < dev->max_queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 1);
+		if (ret < 0)
+			goto error;
+	}
+
+	dev->started = true;
+
+	pthread_mutex_unlock(&dev->mutex);
+	rte_mcfg_mem_read_unlock();
+
+	return 0;
+error:
+	pthread_mutex_unlock(&dev->mutex);
+	rte_mcfg_mem_read_unlock();
+
+	PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path);
+
+	return -1;
+}
+
+int crypto_virtio_user_stop_device(struct virtio_user_dev *dev)
+{
+	uint32_t i;
+	int ret;
+
+	pthread_mutex_lock(&dev->mutex);
+	if (!dev->started)
+		goto out;
+
+	for (i = 0; i < dev->max_queue_pairs; ++i) {
+		ret = dev->ops->enable_qp(dev, i, 0);
+		if (ret < 0)
+			goto err;
+	}
+
+	if (dev->scvq) {
+		ret = dev->ops->cvq_enable(dev, 0);
+		if (ret < 0)
+			goto err;
+	}
+
+	/* Stop the backend. */
+	if (virtio_user_foreach_queue(dev, virtio_user_destroy_queue) < 0)
+		goto err;
+
+	dev->started = false;
+
+out:
+	pthread_mutex_unlock(&dev->mutex);
+
+	return 0;
+err:
+	pthread_mutex_unlock(&dev->mutex);
+
+	PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path);
+
+	return -1;
+}
+
+static int
+virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp)
+{
+	int ret;
+
+	if (!dev->ops->get_config) {
+		dev->max_queue_pairs = user_max_qp;
+		return 0;
+	}
+
+	ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs,
+			offsetof(struct virtio_crypto_config, max_dataqueues),
+			sizeof(uint16_t));
+	if (ret) {
+		/*
+		 * We need to know the max queue pair from the device so that
+		 * the control queue gets the right index.
+		 */
+		dev->max_queue_pairs = 1;
+		PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_dev_init_cipher_services(struct virtio_user_dev *dev)
+{
+	struct virtio_crypto_config config;
+	int ret;
+
+	dev->crypto_services = RTE_BIT32(VIRTIO_CRYPTO_SERVICE_CIPHER);
+	dev->cipher_algo = 0;
+	dev->auth_algo = 0;
+	dev->akcipher_algo = 0;
+
+	if (!dev->ops->get_config)
+		return 0;
+
+	ret = dev->ops->get_config(dev, (uint8_t *)&config,	0, sizeof(config));
+	if (ret) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to get crypto config from device", dev->path);
+		return ret;
+	}
+
+	dev->crypto_services = config.crypto_services;
+	dev->cipher_algo = ((uint64_t)config.cipher_algo_h << 32) |
+						config.cipher_algo_l;
+	dev->hash_algo = config.hash_algo;
+	dev->auth_algo = ((uint64_t)config.mac_algo_h << 32) |
+						config.mac_algo_l;
+	dev->aead_algo = config.aead_algo;
+	dev->akcipher_algo = config.akcipher_algo;
+	return 0;
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+
+	if (virtio_user_foreach_queue(dev, virtio_user_init_notify_queue) < 0)
+		goto err;
+
+	if (dev->device_features & (1ULL << VIRTIO_F_NOTIFICATION_DATA))
+		if (dev->ops->map_notification_area &&
+				dev->ops->map_notification_area(dev))
+			goto err;
+
+	return 0;
+err:
+	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+
+	return -1;
+}
+
+static void
+virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
+{
+	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+
+	if (dev->ops->unmap_notification_area && dev->notify_area)
+		dev->ops->unmap_notification_area(dev);
+}
+
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+			const void *addr,
+			size_t len __rte_unused,
+			void *arg)
+{
+	struct virtio_user_dev *dev = arg;
+	struct rte_memseg_list *msl;
+	uint16_t i;
+	int ret = 0;
+
+	/* ignore externally allocated memory */
+	msl = rte_mem_virt2memseg_list(addr);
+	if (msl->external)
+		return;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	if (dev->started == false)
+		goto exit;
+
+	/* Step 1: pause the active queues */
+	for (i = 0; i < dev->queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 0);
+		if (ret < 0)
+			goto exit;
+	}
+
+	/* Step 2: update memory regions */
+	ret = dev->ops->set_memory_table(dev);
+	if (ret < 0)
+		goto exit;
+
+	/* Step 3: resume the active queues */
+	for (i = 0; i < dev->queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 1);
+		if (ret < 0)
+			goto exit;
+	}
+
+exit:
+	pthread_mutex_unlock(&dev->mutex);
+
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path);
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+	if (dev->is_server) {
+		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
+			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
+			return -1;
+		}
+	}
+
+	switch (dev->backend_type) {
+	case VIRTIO_USER_BACKEND_VHOST_VDPA:
+		dev->ops = &virtio_ops_vdpa;
+		dev->ops->setup = virtio_crypto_ops_vdpa.setup;
+		dev->ops->get_features = virtio_crypto_ops_vdpa.get_features;
+		dev->ops->cvq_enable = virtio_crypto_ops_vdpa.cvq_enable;
+		dev->ops->enable_qp = virtio_crypto_ops_vdpa.enable_qp;
+		dev->ops->update_link_state = virtio_crypto_ops_vdpa.update_link_state;
+		dev->ops->map_notification_area = virtio_crypto_ops_vdpa.map_notification_area;
+		dev->ops->unmap_notification_area = virtio_crypto_ops_vdpa.unmap_notification_area;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
+		return -1;
+	}
+
+	if (dev->ops->setup(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+{
+	int i, size, nr_vrings;
+	bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED));
+
+	nr_vrings = dev->max_queue_pairs + 1;
+
+	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+	if (!dev->callfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
+		return -1;
+	}
+
+	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
+	if (!dev->kickfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
+		goto free_callfds;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		dev->callfds[i] = -1;
+		dev->kickfds[i] = -1;
+	}
+
+	if (packed_ring)
+		size = sizeof(*dev->vrings.packed);
+	else
+		size = sizeof(*dev->vrings.split);
+	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
+	if (!dev->vrings.ptr) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
+		goto free_kickfds;
+	}
+
+	if (packed_ring) {
+		dev->packed_queues = rte_zmalloc("virtio_user_dev",
+				nr_vrings * sizeof(*dev->packed_queues), 0);
+		if (!dev->packed_queues) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata",
+					dev->path);
+			goto free_vrings;
+		}
+	}
+
+	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
+			nr_vrings * sizeof(*dev->qp_enabled), 0);
+	if (!dev->qp_enabled) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
+		goto free_packed_queues;
+	}
+
+	return 0;
+
+free_packed_queues:
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+free_vrings:
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+free_kickfds:
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+free_callfds:
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+
+	return -1;
+}
+
+static void
+virtio_user_free_vrings(struct virtio_user_dev *dev)
+{
+	rte_free(dev->qp_enabled);
+	dev->qp_enabled = NULL;
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+}
+
+#define VIRTIO_USER_SUPPORTED_FEATURES   \
+	(1ULL << VIRTIO_CRYPTO_SERVICE_CIPHER     | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_HASH       | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_AKCIPHER   | \
+	 1ULL << VIRTIO_F_VERSION_1               | \
+	 1ULL << VIRTIO_F_IN_ORDER                | \
+	 1ULL << VIRTIO_F_RING_PACKED             | \
+	 1ULL << VIRTIO_F_NOTIFICATION_DATA       | \
+	 1ULL << VIRTIO_F_ORDER_PLATFORM)
+
+int
+crypto_virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+			int queue_size, int server)
+{
+	uint64_t backend_features;
+
+	pthread_mutex_init(&dev->mutex, NULL);
+	strlcpy(dev->path, path, PATH_MAX);
+
+	dev->started = 0;
+	dev->queue_pairs = 1; /* mq disabled by default */
+	dev->max_queue_pairs = queues; /* initialize to user requested value for kernel backend */
+	dev->queue_size = queue_size;
+	dev->is_server = server;
+	dev->frontend_features = 0;
+	dev->unsupported_features = 0;
+	dev->backend_type = VIRTIO_USER_BACKEND_VHOST_VDPA;
+	dev->hw.modern = 1;
+
+	if (virtio_user_dev_setup(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
+		return -1;
+	}
+
+	if (dev->ops->set_owner(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
+		goto destroy;
+	}
+
+	if (dev->ops->get_backend_features(&backend_features) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
+		goto destroy;
+	}
+
+	dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
+
+	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_max_queue_pairs(dev, queues)) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get max queue pairs", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_cipher_services(dev)) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get cipher services", dev->path);
+		goto destroy;
+	}
+
+	dev->frontend_features &= ~dev->unsupported_features;
+	dev->device_features &= ~dev->unsupported_features;
+
+	if (virtio_user_alloc_vrings(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_notify(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
+		goto free_vrings;
+	}
+
+	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+				virtio_user_mem_event_cb, dev)) {
+		if (rte_errno != ENOTSUP) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback",
+					dev->path);
+			goto notify_uninit;
+		}
+	}
+
+	return 0;
+
+notify_uninit:
+	virtio_user_dev_uninit_notify(dev);
+free_vrings:
+	virtio_user_free_vrings(dev);
+destroy:
+	dev->ops->destroy(dev);
+
+	return -1;
+}
+
+void
+crypto_virtio_user_dev_uninit(struct virtio_user_dev *dev)
+{
+	crypto_virtio_user_stop_device(dev);
+
+	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
+	virtio_user_dev_uninit_notify(dev);
+
+	virtio_user_free_vrings(dev);
+
+	if (dev->is_server)
+		unlink(dev->path);
+
+	dev->ops->destroy(dev);
+}
+
+#define CVQ_MAX_DATA_DESCS 32
+
+static inline void *
+virtio_user_iova2virt(struct virtio_user_dev *dev __rte_unused, rte_iova_t iova)
+{
+	if (rte_eal_iova_mode() == RTE_IOVA_VA)
+		return (void *)(uintptr_t)iova;
+	else
+		return rte_mem_iova2virt(iova);
+}
+
+static inline int
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+	uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
+
+	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
+		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
+}
+
+int
+crypto_virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
+{
+	int ret;
+
+	pthread_mutex_lock(&dev->mutex);
+	dev->status = status;
+	ret = dev->ops->set_status(dev, status);
+	if (ret && ret != -ENOTSUP)
+		PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
+
+	pthread_mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+int
+crypto_virtio_user_dev_update_status(struct virtio_user_dev *dev)
+{
+	int ret;
+	uint8_t status;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	ret = dev->ops->get_status(dev, &status);
+	if (!ret) {
+		dev->status = status;
+		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):"
+			"\t-RESET: %u "
+			"\t-ACKNOWLEDGE: %u "
+			"\t-DRIVER: %u "
+			"\t-DRIVER_OK: %u "
+			"\t-FEATURES_OK: %u "
+			"\t-DEVICE_NEED_RESET: %u "
+			"\t-FAILED: %u",
+			dev->status,
+			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
+	} else if (ret != -ENOTSUP) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
+	}
+
+	pthread_mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+int
+crypto_virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
+{
+	if (dev->ops->update_link_state)
+		return dev->ops->update_link_state(dev);
+
+	return 0;
+}
diff --git a/drivers/crypto/virtio/virtio_user/virtio_user_dev.h b/drivers/crypto/virtio/virtio_user/virtio_user_dev.h
new file mode 100644
index 0000000000..2a0052b3ca
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/virtio_user_dev.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Marvell.
+ */
+
+#ifndef _VIRTIO_USER_DEV_H
+#define _VIRTIO_USER_DEV_H
+
+#include <limits.h>
+#include <stdbool.h>
+
+#include "../virtio_pci.h"
+#include "../virtio_ring.h"
+
+extern struct virtio_user_backend_ops virtio_crypto_ops_vdpa;
+
+enum virtio_user_backend_type {
+	VIRTIO_USER_BACKEND_UNKNOWN,
+	VIRTIO_USER_BACKEND_VHOST_USER,
+	VIRTIO_USER_BACKEND_VHOST_VDPA,
+};
+
+struct virtio_user_queue {
+	uint16_t used_idx;
+	bool avail_wrap_counter;
+	bool used_wrap_counter;
+};
+
+struct virtio_user_dev {
+	union {
+		struct virtio_crypto_hw hw;
+		uint8_t dummy[256];
+	};
+
+	void		*backend_data;
+	uint16_t	**notify_area;
+	char		path[PATH_MAX];
+	bool		hw_cvq;
+	uint16_t	max_queue_pairs;
+	uint64_t	device_features; /* supported features by device */
+	bool		*qp_enabled;
+
+	enum virtio_user_backend_type backend_type;
+	bool		is_server;  /* server or client mode */
+
+	int		*callfds;
+	int		*kickfds;
+	uint16_t	queue_pairs;
+	uint32_t	queue_size;
+	uint64_t	features; /* the negotiated features with driver,
+				   * and will be sync with device
+				   */
+	uint64_t	frontend_features; /* enabled frontend features */
+	uint64_t	unsupported_features; /* unsupported features mask */
+	uint8_t		status;
+	uint32_t	crypto_status;
+	uint32_t	crypto_services;
+	uint64_t	cipher_algo;
+	uint32_t	hash_algo;
+	uint64_t	auth_algo;
+	uint32_t	aead_algo;
+	uint32_t	akcipher_algo;
+
+	union {
+		void			*ptr;
+		struct vring		*split;
+		struct vring_packed	*packed;
+	} vrings;
+
+	struct virtio_user_queue *packed_queues;
+
+	struct virtio_user_backend_ops *ops;
+	pthread_mutex_t	mutex;
+	bool		started;
+
+	struct virtqueue	*scvq;
+};
+
+int crypto_virtio_user_dev_set_features(struct virtio_user_dev *dev);
+int crypto_virtio_user_start_device(struct virtio_user_dev *dev);
+int crypto_virtio_user_stop_device(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+			int queue_size, int server);
+void crypto_virtio_user_dev_uninit(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status);
+int crypto_virtio_user_dev_update_status(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_update_link_state(struct virtio_user_dev *dev);
+extern const char * const crypto_virtio_user_backend_strings[];
+#endif
diff --git a/drivers/crypto/virtio/virtio_user_cryptodev.c b/drivers/crypto/virtio/virtio_user_cryptodev.c
new file mode 100644
index 0000000000..f5725f0a59
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user_cryptodev.c
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Marvell
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <bus_vdev_driver.h>
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#include "virtio_user/virtio_user_dev.h"
+#include "virtio_user/vhost.h"
+#include "virtio_cryptodev.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+#define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw)
+
+static void
+virtio_user_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+		     void *dst, int length __rte_unused)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (offset == offsetof(struct virtio_crypto_config, status)) {
+		crypto_virtio_user_dev_update_link_state(dev);
+		*(uint32_t *)dst = dev->crypto_status;
+	} else if (offset == offsetof(struct virtio_crypto_config, max_dataqueues))
+		*(uint16_t *)dst = dev->max_queue_pairs;
+	else if (offset == offsetof(struct virtio_crypto_config, crypto_services))
+		*(uint32_t *)dst = dev->crypto_services;
+	else if (offset == offsetof(struct virtio_crypto_config, cipher_algo_l))
+		*(uint32_t *)dst = dev->cipher_algo & 0xFFFF;
+	else if (offset == offsetof(struct virtio_crypto_config, cipher_algo_h))
+		*(uint32_t *)dst = dev->cipher_algo >> 32;
+	else if (offset == offsetof(struct virtio_crypto_config, hash_algo))
+		*(uint32_t *)dst = dev->hash_algo;
+	else if (offset == offsetof(struct virtio_crypto_config, mac_algo_l))
+		*(uint32_t *)dst = dev->auth_algo & 0xFFFF;
+	else if (offset == offsetof(struct virtio_crypto_config, mac_algo_h))
+		*(uint32_t *)dst = dev->auth_algo >> 32;
+	else if (offset == offsetof(struct virtio_crypto_config, aead_algo))
+		*(uint32_t *)dst = dev->aead_algo;
+	else if (offset == offsetof(struct virtio_crypto_config, akcipher_algo))
+		*(uint32_t *)dst = dev->akcipher_algo;
+}
+
+static void
+virtio_user_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+		      const void *src, int length)
+{
+	RTE_SET_USED(hw);
+	RTE_SET_USED(src);
+
+	PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
+		    offset, length);
+}
+
+static void
+virtio_user_reset(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+		crypto_virtio_user_stop_device(dev);
+}
+
+static void
+virtio_user_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+	uint8_t old_status = dev->status;
+
+	if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK &&
+			~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) {
+		crypto_virtio_user_dev_set_features(dev);
+		/* Feature negotiation should be only done in probe time.
+		 * So we skip any more request here.
+		 */
+		dev->status |= VIRTIO_CONFIG_STATUS_FEATURES_OK;
+	}
+
+	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
+		if (crypto_virtio_user_start_device(dev)) {
+			crypto_virtio_user_dev_update_status(dev);
+			return;
+		}
+	} else if (status == VIRTIO_CONFIG_STATUS_RESET) {
+		virtio_user_reset(hw);
+	}
+
+	crypto_virtio_user_dev_set_status(dev, status);
+	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK && dev->scvq) {
+		if (dev->ops->cvq_enable(dev, 1) < 0) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to start ctrlq", dev->path);
+			crypto_virtio_user_dev_update_status(dev);
+			return;
+		}
+	}
+}
+
+static uint8_t
+virtio_user_get_status(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	crypto_virtio_user_dev_update_status(dev);
+
+	return dev->status;
+}
+
+#define VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES   \
+	(1ULL << VIRTIO_CRYPTO_SERVICE_CIPHER     | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_AKCIPHER   | \
+	 1ULL << VIRTIO_F_VERSION_1               | \
+	 1ULL << VIRTIO_F_IN_ORDER                | \
+	 1ULL << VIRTIO_F_RING_PACKED             | \
+	 1ULL << VIRTIO_F_NOTIFICATION_DATA       | \
+	 1ULL << VIRTIO_RING_F_INDIRECT_DESC      | \
+	 1ULL << VIRTIO_F_ORDER_PLATFORM)
+
+static uint64_t
+virtio_user_get_features(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	/* unmask feature bits defined in vhost user protocol */
+	return (dev->device_features | dev->frontend_features) &
+		VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES;
+}
+
+static void
+virtio_user_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	dev->features = features & (dev->device_features | dev->frontend_features);
+}
+
+static uint8_t
+virtio_user_get_isr(struct virtio_crypto_hw *hw __rte_unused)
+{
+	/* rxq interrupts and config interrupt are separated in virtio-user,
+	 * here we only report config change.
+	 */
+	return VIRTIO_PCI_CAP_ISR_CFG;
+}
+
+static uint16_t
+virtio_user_set_config_irq(struct virtio_crypto_hw *hw __rte_unused,
+		    uint16_t vec __rte_unused)
+{
+	return 0;
+}
+
+static uint16_t
+virtio_user_set_queue_irq(struct virtio_crypto_hw *hw __rte_unused,
+			  struct virtqueue *vq __rte_unused,
+			  uint16_t vec)
+{
+	/* pretend we have done that */
+	return vec;
+}
+
+/* This function is to get the queue size, aka, number of descs, of a specified
+ * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
+ * max supported queues.
+ */
+static uint16_t
+virtio_user_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id __rte_unused)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	/* Currently, each queue has same queue size */
+	return dev->queue_size;
+}
+
+static void
+virtio_user_setup_queue_packed(struct virtqueue *vq,
+			       struct virtio_user_dev *dev)
+{
+	uint16_t queue_idx = vq->vq_queue_index;
+	struct vring_packed *vring;
+	uint64_t desc_addr;
+	uint64_t avail_addr;
+	uint64_t used_addr;
+	uint16_t i;
+
+	vring  = &dev->vrings.packed[queue_idx];
+	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+	avail_addr = desc_addr + vq->vq_nentries *
+		sizeof(struct vring_packed_desc);
+	used_addr = RTE_ALIGN_CEIL(avail_addr +
+			   sizeof(struct vring_packed_desc_event),
+			   VIRTIO_VRING_ALIGN);
+	vring->num = vq->vq_nentries;
+	vring->desc_iova = vq->vq_ring_mem;
+	vring->desc = (void *)(uintptr_t)desc_addr;
+	vring->driver = (void *)(uintptr_t)avail_addr;
+	vring->device = (void *)(uintptr_t)used_addr;
+	dev->packed_queues[queue_idx].avail_wrap_counter = true;
+	dev->packed_queues[queue_idx].used_wrap_counter = true;
+	dev->packed_queues[queue_idx].used_idx = 0;
+
+	for (i = 0; i < vring->num; i++)
+		vring->desc[i].flags = 0;
+}
+
+static void
+virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
+{
+	uint16_t queue_idx = vq->vq_queue_index;
+	uint64_t desc_addr, avail_addr, used_addr;
+
+	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+							 ring[vq->vq_nentries]),
+				   VIRTIO_VRING_ALIGN);
+
+	dev->vrings.split[queue_idx].num = vq->vq_nentries;
+	dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
+	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (vtpci_with_packed_queue(hw))
+		virtio_user_setup_queue_packed(vq, dev);
+	else
+		virtio_user_setup_queue_split(vq, dev);
+
+	if (dev->notify_area)
+		vq->notify_addr = dev->notify_area[vq->vq_queue_index];
+
+	if (virtcrypto_cq_to_vq(hw->cvq) == vq)
+		dev->scvq = virtcrypto_cq_to_vq(hw->cvq);
+
+	return 0;
+}
+
+static void
+virtio_user_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
+	 * correspondingly stops the ioeventfds, and reset the status of
+	 * the device.
+	 * For modern devices, set queue desc, avail, used in PCI bar to 0,
+	 * not see any more behavior in QEMU.
+	 *
+	 * Here we just care about what information to deliver to vhost-user
+	 * or vhost-kernel. So we just close ioeventfd for now.
+	 */
+
+	RTE_SET_USED(hw);
+	RTE_SET_USED(vq);
+}
+
+static void
+virtio_user_notify_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+	uint64_t notify_data = 1;
+
+	if (!dev->notify_area) {
+		if (write(dev->kickfds[vq->vq_queue_index], &notify_data,
+			  sizeof(notify_data)) < 0)
+			PMD_DRV_LOG(ERR, "failed to kick backend: %s",
+				    strerror(errno));
+		return;
+	} else if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
+		rte_write16(vq->vq_queue_index, vq->notify_addr);
+		return;
+	}
+
+	if (vtpci_with_packed_queue(hw)) {
+		/* Bit[0:15]: vq queue index
+		 * Bit[16:30]: avail index
+		 * Bit[31]: avail wrap counter
+		 */
+		notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
+				VRING_PACKED_DESC_F_AVAIL)) << 31) |
+				((uint32_t)vq->vq_avail_idx << 16) |
+				vq->vq_queue_index;
+	} else {
+		/* Bit[0:15]: vq queue index
+		 * Bit[16:31]: avail index
+		 */
+		notify_data = ((uint32_t)vq->vq_avail_idx << 16) |
+				vq->vq_queue_index;
+	}
+	rte_write32(notify_data, vq->notify_addr);
+}
+
+const struct virtio_pci_ops crypto_virtio_user_ops = {
+	.read_dev_cfg	= virtio_user_read_dev_config,
+	.write_dev_cfg	= virtio_user_write_dev_config,
+	.reset		= virtio_user_reset,
+	.get_status	= virtio_user_get_status,
+	.set_status	= virtio_user_set_status,
+	.get_features	= virtio_user_get_features,
+	.set_features	= virtio_user_set_features,
+	.get_isr	= virtio_user_get_isr,
+	.set_config_irq	= virtio_user_set_config_irq,
+	.set_queue_irq	= virtio_user_set_queue_irq,
+	.get_queue_num	= virtio_user_get_queue_num,
+	.setup_queue	= virtio_user_setup_queue,
+	.del_queue	= virtio_user_del_queue,
+	.notify_queue	= virtio_user_notify_queue,
+};
+
+static const char * const valid_args[] = {
+#define VIRTIO_USER_ARG_QUEUES_NUM     "queues"
+	VIRTIO_USER_ARG_QUEUES_NUM,
+#define VIRTIO_USER_ARG_QUEUE_SIZE     "queue_size"
+	VIRTIO_USER_ARG_QUEUE_SIZE,
+#define VIRTIO_USER_ARG_PATH           "path"
+	VIRTIO_USER_ARG_PATH,
+#define VIRTIO_USER_ARG_SERVER_MODE    "server"
+	VIRTIO_USER_ARG_SERVER_MODE,
+	NULL
+};
+
+#define VIRTIO_USER_DEF_Q_NUM	1
+#define VIRTIO_USER_DEF_Q_SZ	256
+#define VIRTIO_USER_DEF_SERVER_MODE	0
+
+static int
+get_string_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	if (!value || !extra_args)
+		return -EINVAL;
+
+	*(char **)extra_args = strdup(value);
+
+	if (!*(char **)extra_args)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int
+get_integer_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	uint64_t integer = 0;
+	if (!value || !extra_args)
+		return -EINVAL;
+	errno = 0;
+	integer = strtoull(value, NULL, 0);
+	/* extra_args keeps default value, it should be replaced
+	 * only in case of successful parsing of the 'value' arg
+	 */
+	if (errno == 0)
+		*(uint64_t *)extra_args = integer;
+	return -errno;
+}
+
+static struct rte_cryptodev *
+virtio_user_cryptodev_alloc(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.private_data_size = sizeof(struct virtio_user_dev),
+	};
+	struct rte_cryptodev_data *data;
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	struct virtio_crypto_hw *hw;
+
+	init_params.socket_id = vdev->device.numa_node;
+	init_params.private_data_size = sizeof(struct virtio_user_dev);
+	cryptodev = rte_cryptodev_pmd_create(vdev->device.name, &vdev->device, &init_params);
+	if (cryptodev == NULL) {
+		PMD_INIT_LOG(ERR, "failed to create cryptodev vdev");
+		return NULL;
+	}
+
+	data = cryptodev->data;
+	dev = data->dev_private;
+	hw = &dev->hw;
+
+	hw->dev_id = data->dev_id;
+	VTPCI_OPS(hw) = &crypto_virtio_user_ops;
+
+	return cryptodev;
+}
+
+static void
+virtio_user_cryptodev_free(struct rte_cryptodev *cryptodev)
+{
+	rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static int
+virtio_user_pmd_probe(struct rte_vdev_device *vdev)
+{
+	uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+	uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+	uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
+	struct rte_cryptodev *cryptodev = NULL;
+	struct rte_kvargs *kvlist = NULL;
+	struct virtio_user_dev *dev;
+	char *path = NULL;
+	int ret;
+
+	kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args);
+
+	if (!kvlist) {
+		PMD_INIT_LOG(ERR, "error when parsing param");
+		goto end;
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
+					&get_string_arg, &path) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_PATH);
+			goto end;
+		}
+	} else {
+		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
+				VIRTIO_USER_ARG_PATH);
+		goto end;
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
+					&get_integer_arg, &queues) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_QUEUES_NUM);
+			goto end;
+		}
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
+					&get_integer_arg, &queue_size) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_QUEUE_SIZE);
+			goto end;
+		}
+	}
+
+	cryptodev = virtio_user_cryptodev_alloc(vdev);
+	if (!cryptodev) {
+		PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+		goto end;
+	}
+
+	dev = cryptodev->data->dev_private;
+	if (crypto_virtio_user_dev_init(dev, path, queues, queue_size,
+			server_mode) < 0) {
+		PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+		virtio_user_cryptodev_free(cryptodev);
+		goto end;
+	}
+
+	if (crypto_virtio_dev_init(cryptodev, VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES,
+			NULL) < 0) {
+		PMD_INIT_LOG(ERR, "crypto_virtio_dev_init fails");
+		crypto_virtio_user_dev_uninit(dev);
+		virtio_user_cryptodev_free(cryptodev);
+		goto end;
+	}
+
+	rte_cryptodev_pmd_probing_finish(cryptodev);
+
+	ret = 0;
+end:
+	rte_kvargs_free(kvlist);
+	free(path);
+	return ret;
+}
+
+static int
+virtio_user_pmd_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	const char *name;
+	int devid;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	PMD_DRV_LOG(INFO, "Removing %s", name);
+
+	devid = rte_cryptodev_get_dev_id(name);
+	if (devid < 0)
+		return -EINVAL;
+
+	rte_cryptodev_stop(devid);
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (rte_cryptodev_pmd_destroy(cryptodev) < 0) {
+		PMD_DRV_LOG(ERR, "Failed to remove %s", name);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
+		uint64_t iova, size_t len)
+{
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	const char *name;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -EINVAL;
+
+	dev = cryptodev->data->dev_private;
+
+	if (dev->ops->dma_map)
+		return dev->ops->dma_map(dev, addr, iova, len);
+
+	return 0;
+}
+
+static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr,
+		uint64_t iova, size_t len)
+{
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	const char *name;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -EINVAL;
+
+	dev = cryptodev->data->dev_private;
+
+	if (dev->ops->dma_unmap)
+		return dev->ops->dma_unmap(dev, addr, iova, len);
+
+	return 0;
+}
+
+static struct rte_vdev_driver virtio_user_driver = {
+	.probe = virtio_user_pmd_probe,
+	.remove = virtio_user_pmd_remove,
+	.dma_map = virtio_user_pmd_dma_map,
+	.dma_unmap = virtio_user_pmd_dma_unmap,
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(crypto_virtio_user, virtio_user_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+	virtio_user_driver.driver,
+	cryptodev_virtio_driver_id);
+RTE_PMD_REGISTER_ALIAS(crypto_virtio_user, crypto_virtio);
+RTE_PMD_REGISTER_PARAM_STRING(crypto_virtio_user,
+	"path=<path> "
+	"queues=<int> "
+	"queue_size=<int>");
-- 
2.25.1


^ permalink raw reply	[relevance 1%]

* [PATCH] ring: add the second version of the RTS interface
@ 2025-01-05  9:57  5% Huichao Cai
  2025-01-05 15:13  5% ` [PATCH v2] " Huichao Cai
  0 siblings, 1 reply; 169+ results
From: Huichao Cai @ 2025-01-05  9:57 UTC (permalink / raw)
  To: honnappa.nagarahalli, konstantin.v.ananyev; +Cc: dev

The timing of the update of the RTS enqueues/dequeues tail is
limited to the last enqueues/dequeues, which reduces concurrency,
so the RTS interface of the V2 version is added, which makes the tail
of the enqueues/dequeues not limited to the last enqueues/dequeues
and thus enables timely updates to increase concurrency.

Add some corresponding test cases.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/meson.build                   |   1 +
 app/test/test_ring.c                   |  26 +++
 app/test/test_ring_rts_v2_stress.c     |  32 ++++
 app/test/test_ring_stress.c            |   3 +
 app/test/test_ring_stress.h            |   1 +
 devtools/libabigail.abignore           |   3 +
 doc/guides/rel_notes/release_25_03.rst |   2 +
 lib/ring/rte_ring.c                    |  53 +++++-
 lib/ring/rte_ring.h                    |  12 ++
 lib/ring/rte_ring_core.h               |   9 ++
 lib/ring/rte_ring_elem.h               |  18 +++
 lib/ring/rte_ring_rts.h                | 216 ++++++++++++++++++++++++-
 lib/ring/rte_ring_rts_elem_pvt.h       | 168 +++++++++++++++++++
 13 files changed, 534 insertions(+), 10 deletions(-)
 create mode 100644 app/test/test_ring_rts_v2_stress.c

diff --git a/app/test/meson.build b/app/test/meson.build
index d5cb6a7f7a..e3d8cef3fa 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -166,6 +166,7 @@ source_file_deps = {
     'test_ring_mt_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_perf.c': ['ptr_compress'],
     'test_ring_rts_stress.c': ['ptr_compress'],
+    'test_ring_rts_v2_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_stress.c': ['ptr_compress'],
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index ba1fec1de3..094f14b859 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -284,6 +284,19 @@ static const struct {
 			.felem = rte_ring_dequeue_bulk_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_bulk,
+			.felem = rte_ring_enqueue_bulk_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_bulk,
+			.felem = rte_ring_dequeue_bulk_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
@@ -349,6 +362,19 @@ static const struct {
 			.felem = rte_ring_dequeue_burst_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_burst,
+			.felem = rte_ring_enqueue_burst_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_burst,
+			.felem = rte_ring_dequeue_burst_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
diff --git a/app/test/test_ring_rts_v2_stress.c b/app/test/test_ring_rts_v2_stress.c
new file mode 100644
index 0000000000..6079366a7d
--- /dev/null
+++ b/app/test/test_ring_rts_v2_stress.c
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include "test_ring_stress_impl.h"
+
+static inline uint32_t
+_st_ring_dequeue_bulk(struct rte_ring *r, void **obj, uint32_t n,
+	uint32_t *avail)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk(r, obj, n, avail);
+}
+
+static inline uint32_t
+_st_ring_enqueue_bulk(struct rte_ring *r, void * const *obj, uint32_t n,
+	uint32_t *free)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk(r, obj, n, free);
+}
+
+static int
+_st_ring_init(struct rte_ring *r, const char *name, uint32_t num)
+{
+	return rte_ring_init(r, name, num,
+		RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ);
+}
+
+const struct test test_ring_rts_v2_stress = {
+	.name = "MT_RTS_V2",
+	.nb_case = RTE_DIM(tests),
+	.cases = tests,
+};
diff --git a/app/test/test_ring_stress.c b/app/test/test_ring_stress.c
index 1af45e0fc8..94085acd5e 100644
--- a/app/test/test_ring_stress.c
+++ b/app/test/test_ring_stress.c
@@ -43,6 +43,9 @@ test_ring_stress(void)
 	n += test_ring_rts_stress.nb_case;
 	k += run_test(&test_ring_rts_stress);
 
+	n += test_ring_rts_v2_stress.nb_case;
+	k += run_test(&test_ring_rts_v2_stress);
+
 	n += test_ring_hts_stress.nb_case;
 	k += run_test(&test_ring_hts_stress);
 
diff --git a/app/test/test_ring_stress.h b/app/test/test_ring_stress.h
index 416d68c9a0..505957f6fb 100644
--- a/app/test/test_ring_stress.h
+++ b/app/test/test_ring_stress.h
@@ -34,6 +34,7 @@ struct test {
 
 extern const struct test test_ring_mpmc_stress;
 extern const struct test test_ring_rts_stress;
+extern const struct test test_ring_rts_v2_stress;
 extern const struct test test_ring_hts_stress;
 extern const struct test test_ring_mt_peek_stress;
 extern const struct test test_ring_mt_peek_stress_zc;
diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
index 21b8cd6113..0a0f305acb 100644
--- a/devtools/libabigail.abignore
+++ b/devtools/libabigail.abignore
@@ -33,3 +33,6 @@
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Temporary exceptions till next major ABI version ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+[suppress_type]
+       name = rte_ring_rts_headtail
+       has_data_member_inserted_between = {offset_of(head), end}
diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
index 426dfcd982..f73bc9e397 100644
--- a/doc/guides/rel_notes/release_25_03.rst
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -102,6 +102,8 @@ ABI Changes
 
 * No ABI change that would break compatibility with 24.11.
 
+* ring: Added ``rte_ring_rts_cache`` structure and ``rts_cache`` field to the
+  ``rte_ring_rts_headtail`` structure.
 
 Known Issues
 ------------
diff --git a/lib/ring/rte_ring.c b/lib/ring/rte_ring.c
index aebb6d6728..df84592300 100644
--- a/lib/ring/rte_ring.c
+++ b/lib/ring/rte_ring.c
@@ -43,7 +43,8 @@ EAL_REGISTER_TAILQ(rte_ring_tailq)
 /* mask of all valid flag values to ring_create() */
 #define RING_F_MASK (RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ | \
 		     RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ |	       \
-		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ)
+		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |	       \
+		     RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ)
 
 /* true if x is a power of 2 */
 #define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -106,6 +107,7 @@ reset_headtail(void *p)
 		ht->tail = 0;
 		break;
 	case RTE_RING_SYNC_MT_RTS:
+	case RTE_RING_SYNC_MT_RTS_V2:
 		ht_rts->head.raw = 0;
 		ht_rts->tail.raw = 0;
 		break;
@@ -135,9 +137,11 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	enum rte_ring_sync_type *cons_st)
 {
 	static const uint32_t prod_st_flags =
-		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ);
+		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ |
+		RING_F_MP_RTS_V2_ENQ);
 	static const uint32_t cons_st_flags =
-		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ);
+		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ |
+		RING_F_MC_RTS_V2_DEQ);
 
 	switch (flags & prod_st_flags) {
 	case 0:
@@ -152,6 +156,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MP_HTS_ENQ:
 		*prod_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MP_RTS_V2_ENQ:
+		*prod_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -169,6 +176,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MC_HTS_DEQ:
 		*cons_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MC_RTS_V2_DEQ:
+		*cons_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -239,6 +249,28 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
 	if (flags & RING_F_MC_RTS_DEQ)
 		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
 
+	/* set default values for head-tail distance and allocate memory to cache */
+	if (flags & RING_F_MP_RTS_V2_ENQ) {
+		rte_ring_set_prod_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_prod.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_PROD_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_prod.rts_cache == NULL) {
+			RING_LOG(ERR, "Cannot reserve memory for rts prod cache");
+			return -ENOMEM;
+		}
+	}
+	if (flags & RING_F_MC_RTS_V2_DEQ) {
+		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_cons.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_CONS_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_cons.rts_cache == NULL) {
+			if (flags & RING_F_MP_RTS_V2_ENQ)
+				rte_free(r->rts_prod.rts_cache);
+			RING_LOG(ERR, "Cannot reserve memory for rts cons cache");
+			return -ENOMEM;
+		}
+	}
+
 	return 0;
 }
 
@@ -293,9 +325,13 @@ rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count,
 					 mz_flags, alignof(typeof(*r)));
 	if (mz != NULL) {
 		r = mz->addr;
-		/* no need to check return value here, we already checked the
-		 * arguments above */
-		rte_ring_init(r, name, requested_count, flags);
+
+		if (rte_ring_init(r, name, requested_count, flags)) {
+			rte_free(te);
+			if (rte_memzone_free(mz) != 0)
+				RING_LOG(ERR, "Cannot free memory for ring");
+			return NULL;
+		}
 
 		te->data = (void *) r;
 		r->memzone = mz;
@@ -358,6 +394,11 @@ rte_ring_free(struct rte_ring *r)
 
 	rte_mcfg_tailq_write_unlock();
 
+	if (r->flags & RING_F_MP_RTS_V2_ENQ)
+		rte_free(r->rts_prod.rts_cache);
+	if (r->flags & RING_F_MC_RTS_V2_DEQ)
+		rte_free(r->rts_cons.rts_cache);
+
 	if (rte_memzone_free(r->memzone) != 0)
 		RING_LOG(ERR, "Cannot free memory");
 
diff --git a/lib/ring/rte_ring.h b/lib/ring/rte_ring.h
index 11ca69c73d..2b35ce038e 100644
--- a/lib/ring/rte_ring.h
+++ b/lib/ring/rte_ring.h
@@ -89,6 +89,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -101,6 +104,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -149,6 +155,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -161,6 +170,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
diff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h
index 6cd6ce9884..9e627d26c1 100644
--- a/lib/ring/rte_ring_core.h
+++ b/lib/ring/rte_ring_core.h
@@ -55,6 +55,7 @@ enum rte_ring_sync_type {
 	RTE_RING_SYNC_ST,     /**< single thread only */
 	RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
 	RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
+	RTE_RING_SYNC_MT_RTS_V2, /**< multi-thread relaxed tail sync v2 */
 };
 
 /**
@@ -82,11 +83,16 @@ union __rte_ring_rts_poscnt {
 	} val;
 };
 
+struct rte_ring_rts_cache {
+	volatile RTE_ATOMIC(uint32_t) num;      /**< Number of objs. */
+};
+
 struct rte_ring_rts_headtail {
 	volatile union __rte_ring_rts_poscnt tail;
 	enum rte_ring_sync_type sync_type;  /**< sync type of prod/cons */
 	uint32_t htd_max;   /**< max allowed distance between head/tail */
 	volatile union __rte_ring_rts_poscnt head;
+	struct rte_ring_rts_cache *rts_cache; /**< Cache of prod/cons */
 };
 
 union __rte_ring_hts_pos {
@@ -163,4 +169,7 @@ struct rte_ring {
 #define RING_F_MP_HTS_ENQ 0x0020 /**< The default enqueue is "MP HTS". */
 #define RING_F_MC_HTS_DEQ 0x0040 /**< The default dequeue is "MC HTS". */
 
+#define RING_F_MP_RTS_V2_ENQ 0x0080 /**< The default enqueue is "MP RTS V2". */
+#define RING_F_MC_RTS_V2_DEQ 0x0100 /**< The default dequeue is "MC RTS V2". */
+
 #endif /* _RTE_RING_CORE_H_ */
diff --git a/lib/ring/rte_ring_elem.h b/lib/ring/rte_ring_elem.h
index b96bfc003f..1352709f94 100644
--- a/lib/ring/rte_ring_elem.h
+++ b/lib/ring/rte_ring_elem.h
@@ -71,6 +71,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -83,6 +86,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -203,6 +209,9 @@ rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
 			free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table, esize, n,
+			free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -385,6 +394,9 @@ rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
@@ -571,6 +583,9 @@ rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
 			n, free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table, esize,
+			n, free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -681,6 +696,9 @@ rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
diff --git a/lib/ring/rte_ring_rts.h b/lib/ring/rte_ring_rts.h
index d7a3863c83..b47e400452 100644
--- a/lib/ring/rte_ring_rts.h
+++ b/lib/ring/rte_ring_rts.h
@@ -84,6 +84,33 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_FIXED, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS ring (multi-consumers safe).
  *
@@ -111,6 +138,33 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_FIXED, available);
 }
 
+/**
+ * Dequeue several objects from an RTS ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -138,6 +192,33 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS  ring (multi-consumers safe).
  * When the requested objects are more than the available objects,
@@ -167,6 +248,35 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, available);
 }
 
+/**
+ * Dequeue several objects from an RTS  ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -213,6 +323,52 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -261,6 +417,54 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Return producer max Head-Tail-Distance (HTD).
  *
@@ -273,7 +477,8 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 static inline uint32_t
 rte_ring_get_prod_htd_max(const struct rte_ring *r)
 {
-	if (r->prod.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->prod.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_prod.htd_max;
 	return UINT32_MAX;
 }
@@ -292,7 +497,8 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->prod.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->prod.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_prod.htd_max = v;
@@ -311,7 +517,8 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 static inline uint32_t
 rte_ring_get_cons_htd_max(const struct rte_ring *r)
 {
-	if (r->cons.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->cons.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_cons.htd_max;
 	return UINT32_MAX;
 }
@@ -330,7 +537,8 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->cons.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->cons.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_cons.htd_max = v;
diff --git a/lib/ring/rte_ring_rts_elem_pvt.h b/lib/ring/rte_ring_rts_elem_pvt.h
index 122650346b..4ce22a93ed 100644
--- a/lib/ring/rte_ring_rts_elem_pvt.h
+++ b/lib/ring/rte_ring_rts_elem_pvt.h
@@ -46,6 +46,92 @@ __rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
 			rte_memory_order_release, rte_memory_order_acquire) == 0);
 }
 
+/**
+ * @file rte_ring_rts_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for Relaxed Tail Sync (RTS) ring mode.
+ * For more information please refer to <rte_ring_rts.h>.
+ */
+
+/**
+ * @internal This function updates tail values.
+ */
+static __rte_always_inline void
+__rte_ring_rts_v2_update_tail(struct rte_ring_rts_headtail *ht,
+	uint32_t old_tail, uint32_t num, uint32_t mask)
+{
+	union __rte_ring_rts_poscnt ot, nt;
+
+	ot.val.cnt = nt.val.cnt = 0;
+	ot.val.pos = old_tail;
+	nt.val.pos = old_tail + num;
+
+	/*
+	 * If the tail is equal to the current enqueues/dequeues, update
+	 * the tail with new value and then continue to try to update the
+	 * tail until the num of the cache is 0, otherwise write the num of
+	 * the current enqueues/dequeues to the cache.
+	 */
+
+	if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+				(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+				rte_memory_order_release, rte_memory_order_acquire) == 0) {
+		ot.val.pos = old_tail;
+
+		/*
+		 * Write the num of the current enqueues/dequeues to the
+		 * corresponding cache.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			num, rte_memory_order_release);
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		/*
+		 * Set the corresponding cache to 0 for next use.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	}
+
+	/*
+	 * Try to update the tail until the num of the corresponding cache is 0.
+	 * Getting here means that the current enqueues/dequeues is trying to update
+	 * the tail of another enqueues/dequeues.
+	 */
+	while (1) {
+		num = rte_atomic_load_explicit(&ht->rts_cache[nt.val.pos & mask].num,
+			rte_memory_order_acquire);
+		if (num == 0)
+			break;
+
+		ot.val.pos = nt.val.pos;
+		nt.val.pos += num;
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	};
+}
+
 /**
  * @internal This function waits till head/tail distance wouldn't
  * exceed pre-defined max value.
@@ -218,6 +304,47 @@ __rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Enqueue several objects on the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param free_space
+ *   returns the amount of space after the enqueue operation has finished
+ * @return
+ *   Actual number of objects enqueued.
+ *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_enqueue_elem(struct rte_ring *r, const void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *free_space)
+{
+	uint32_t free, head;
+
+	n =  __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
+
+	if (n != 0) {
+		__rte_ring_enqueue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_prod, head, n, r->mask);
+	}
+
+	if (free_space != NULL)
+		*free_space = free - n;
+	return n;
+}
+
 /**
  * @internal Dequeue several objects from the RTS ring.
  *
@@ -259,4 +386,45 @@ __rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Dequeue several objects from the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to pull from the ring.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param available
+ *   returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ *   - Actual number of objects dequeued.
+ *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_dequeue_elem(struct rte_ring *r, void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *available)
+{
+	uint32_t entries, head;
+
+	n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
+
+	if (n != 0) {
+		__rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_cons, head, n, r->mask);
+	}
+
+	if (available != NULL)
+		*available = entries - n;
+	return n;
+}
+
 #endif /* _RTE_RING_RTS_ELEM_PVT_H_ */
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* [PATCH v2] ring: add the second version of the RTS interface
@ 2025-01-05 15:09  5% Huichao Cai
  0 siblings, 0 replies; 169+ results
From: Huichao Cai @ 2025-01-05 15:09 UTC (permalink / raw)
  To: honnappa.nagarahalli, konstantin.v.ananyev, thomas; +Cc: dev

The timing of the update of the RTS enqueues/dequeues tail is
limited to the last enqueues/dequeues, which reduces concurrency,
so the RTS interface of the V2 version is added, which makes the tail
of the enqueues/dequeues not limited to the last enqueues/dequeues
and thus enables timely updates to increase concurrency.

Add some corresponding test cases.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/meson.build                   |   1 +
 app/test/test_ring.c                   |  26 +++
 app/test/test_ring_rts_v2_stress.c     |  32 ++++
 app/test/test_ring_stress.c            |   3 +
 app/test/test_ring_stress.h            |   1 +
 devtools/libabigail.abignore           |   6 +
 doc/guides/rel_notes/release_25_03.rst |   2 +
 lib/ring/rte_ring.c                    |  54 ++++++-
 lib/ring/rte_ring.h                    |  12 ++
 lib/ring/rte_ring_core.h               |   9 ++
 lib/ring/rte_ring_elem.h               |  18 +++
 lib/ring/rte_ring_rts.h                | 216 ++++++++++++++++++++++++-
 lib/ring/rte_ring_rts_elem_pvt.h       | 168 +++++++++++++++++++
 13 files changed, 538 insertions(+), 10 deletions(-)
 create mode 100644 app/test/test_ring_rts_v2_stress.c

diff --git a/app/test/meson.build b/app/test/meson.build
index d5cb6a7f7a..e3d8cef3fa 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -166,6 +166,7 @@ source_file_deps = {
     'test_ring_mt_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_perf.c': ['ptr_compress'],
     'test_ring_rts_stress.c': ['ptr_compress'],
+    'test_ring_rts_v2_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_stress.c': ['ptr_compress'],
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index ba1fec1de3..094f14b859 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -284,6 +284,19 @@ static const struct {
 			.felem = rte_ring_dequeue_bulk_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_bulk,
+			.felem = rte_ring_enqueue_bulk_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_bulk,
+			.felem = rte_ring_dequeue_bulk_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
@@ -349,6 +362,19 @@ static const struct {
 			.felem = rte_ring_dequeue_burst_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_burst,
+			.felem = rte_ring_enqueue_burst_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_burst,
+			.felem = rte_ring_dequeue_burst_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
diff --git a/app/test/test_ring_rts_v2_stress.c b/app/test/test_ring_rts_v2_stress.c
new file mode 100644
index 0000000000..6079366a7d
--- /dev/null
+++ b/app/test/test_ring_rts_v2_stress.c
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include "test_ring_stress_impl.h"
+
+static inline uint32_t
+_st_ring_dequeue_bulk(struct rte_ring *r, void **obj, uint32_t n,
+	uint32_t *avail)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk(r, obj, n, avail);
+}
+
+static inline uint32_t
+_st_ring_enqueue_bulk(struct rte_ring *r, void * const *obj, uint32_t n,
+	uint32_t *free)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk(r, obj, n, free);
+}
+
+static int
+_st_ring_init(struct rte_ring *r, const char *name, uint32_t num)
+{
+	return rte_ring_init(r, name, num,
+		RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ);
+}
+
+const struct test test_ring_rts_v2_stress = {
+	.name = "MT_RTS_V2",
+	.nb_case = RTE_DIM(tests),
+	.cases = tests,
+};
diff --git a/app/test/test_ring_stress.c b/app/test/test_ring_stress.c
index 1af45e0fc8..94085acd5e 100644
--- a/app/test/test_ring_stress.c
+++ b/app/test/test_ring_stress.c
@@ -43,6 +43,9 @@ test_ring_stress(void)
 	n += test_ring_rts_stress.nb_case;
 	k += run_test(&test_ring_rts_stress);
 
+	n += test_ring_rts_v2_stress.nb_case;
+	k += run_test(&test_ring_rts_v2_stress);
+
 	n += test_ring_hts_stress.nb_case;
 	k += run_test(&test_ring_hts_stress);
 
diff --git a/app/test/test_ring_stress.h b/app/test/test_ring_stress.h
index 416d68c9a0..505957f6fb 100644
--- a/app/test/test_ring_stress.h
+++ b/app/test/test_ring_stress.h
@@ -34,6 +34,7 @@ struct test {
 
 extern const struct test test_ring_mpmc_stress;
 extern const struct test test_ring_rts_stress;
+extern const struct test test_ring_rts_v2_stress;
 extern const struct test test_ring_hts_stress;
 extern const struct test test_ring_mt_peek_stress;
 extern const struct test test_ring_mt_peek_stress_zc;
diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
index 21b8cd6113..d4dd99a99e 100644
--- a/devtools/libabigail.abignore
+++ b/devtools/libabigail.abignore
@@ -33,3 +33,9 @@
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Temporary exceptions till next major ABI version ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+[suppress_type]
+       type_kind = struct
+       name = rte_ring_rts_cache
+[suppress_type]
+       name = rte_ring_rts_headtail
+       has_data_member_inserted_between = {offset_of(head), end}
diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
index 426dfcd982..f73bc9e397 100644
--- a/doc/guides/rel_notes/release_25_03.rst
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -102,6 +102,8 @@ ABI Changes
 
 * No ABI change that would break compatibility with 24.11.
 
+* ring: Added ``rte_ring_rts_cache`` structure and ``rts_cache`` field to the
+  ``rte_ring_rts_headtail`` structure.
 
 Known Issues
 ------------
diff --git a/lib/ring/rte_ring.c b/lib/ring/rte_ring.c
index aebb6d6728..ada1ae88fa 100644
--- a/lib/ring/rte_ring.c
+++ b/lib/ring/rte_ring.c
@@ -43,7 +43,8 @@ EAL_REGISTER_TAILQ(rte_ring_tailq)
 /* mask of all valid flag values to ring_create() */
 #define RING_F_MASK (RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ | \
 		     RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ |	       \
-		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ)
+		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |	       \
+		     RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ)
 
 /* true if x is a power of 2 */
 #define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -106,6 +107,7 @@ reset_headtail(void *p)
 		ht->tail = 0;
 		break;
 	case RTE_RING_SYNC_MT_RTS:
+	case RTE_RING_SYNC_MT_RTS_V2:
 		ht_rts->head.raw = 0;
 		ht_rts->tail.raw = 0;
 		break;
@@ -135,9 +137,11 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	enum rte_ring_sync_type *cons_st)
 {
 	static const uint32_t prod_st_flags =
-		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ);
+		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ |
+		RING_F_MP_RTS_V2_ENQ);
 	static const uint32_t cons_st_flags =
-		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ);
+		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ |
+		RING_F_MC_RTS_V2_DEQ);
 
 	switch (flags & prod_st_flags) {
 	case 0:
@@ -152,6 +156,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MP_HTS_ENQ:
 		*prod_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MP_RTS_V2_ENQ:
+		*prod_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -169,6 +176,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MC_HTS_DEQ:
 		*cons_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MC_RTS_V2_DEQ:
+		*cons_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -239,6 +249,28 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
 	if (flags & RING_F_MC_RTS_DEQ)
 		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
 
+	/* set default values for head-tail distance and allocate memory to cache */
+	if (flags & RING_F_MP_RTS_V2_ENQ) {
+		rte_ring_set_prod_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_prod.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_PROD_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_prod.rts_cache == NULL) {
+			RING_LOG(ERR, "Cannot reserve memory for rts prod cache");
+			return -ENOMEM;
+		}
+	}
+	if (flags & RING_F_MC_RTS_V2_DEQ) {
+		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_cons.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_CONS_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_cons.rts_cache == NULL) {
+			if (flags & RING_F_MP_RTS_V2_ENQ)
+				rte_free(r->rts_prod.rts_cache);
+			RING_LOG(ERR, "Cannot reserve memory for rts cons cache");
+			return -ENOMEM;
+		}
+	}
+
 	return 0;
 }
 
@@ -293,9 +325,14 @@ rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count,
 					 mz_flags, alignof(typeof(*r)));
 	if (mz != NULL) {
 		r = mz->addr;
-		/* no need to check return value here, we already checked the
-		 * arguments above */
-		rte_ring_init(r, name, requested_count, flags);
+
+		if (rte_ring_init(r, name, requested_count, flags)) {
+			rte_free(te);
+			if (rte_memzone_free(mz) != 0)
+				RING_LOG(ERR, "Cannot free memory for ring");
+			rte_mcfg_tailq_write_unlock();
+			return NULL;
+		}
 
 		te->data = (void *) r;
 		r->memzone = mz;
@@ -358,6 +395,11 @@ rte_ring_free(struct rte_ring *r)
 
 	rte_mcfg_tailq_write_unlock();
 
+	if (r->flags & RING_F_MP_RTS_V2_ENQ)
+		rte_free(r->rts_prod.rts_cache);
+	if (r->flags & RING_F_MC_RTS_V2_DEQ)
+		rte_free(r->rts_cons.rts_cache);
+
 	if (rte_memzone_free(r->memzone) != 0)
 		RING_LOG(ERR, "Cannot free memory");
 
diff --git a/lib/ring/rte_ring.h b/lib/ring/rte_ring.h
index 11ca69c73d..2b35ce038e 100644
--- a/lib/ring/rte_ring.h
+++ b/lib/ring/rte_ring.h
@@ -89,6 +89,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -101,6 +104,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -149,6 +155,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -161,6 +170,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
diff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h
index 6cd6ce9884..9e627d26c1 100644
--- a/lib/ring/rte_ring_core.h
+++ b/lib/ring/rte_ring_core.h
@@ -55,6 +55,7 @@ enum rte_ring_sync_type {
 	RTE_RING_SYNC_ST,     /**< single thread only */
 	RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
 	RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
+	RTE_RING_SYNC_MT_RTS_V2, /**< multi-thread relaxed tail sync v2 */
 };
 
 /**
@@ -82,11 +83,16 @@ union __rte_ring_rts_poscnt {
 	} val;
 };
 
+struct rte_ring_rts_cache {
+	volatile RTE_ATOMIC(uint32_t) num;      /**< Number of objs. */
+};
+
 struct rte_ring_rts_headtail {
 	volatile union __rte_ring_rts_poscnt tail;
 	enum rte_ring_sync_type sync_type;  /**< sync type of prod/cons */
 	uint32_t htd_max;   /**< max allowed distance between head/tail */
 	volatile union __rte_ring_rts_poscnt head;
+	struct rte_ring_rts_cache *rts_cache; /**< Cache of prod/cons */
 };
 
 union __rte_ring_hts_pos {
@@ -163,4 +169,7 @@ struct rte_ring {
 #define RING_F_MP_HTS_ENQ 0x0020 /**< The default enqueue is "MP HTS". */
 #define RING_F_MC_HTS_DEQ 0x0040 /**< The default dequeue is "MC HTS". */
 
+#define RING_F_MP_RTS_V2_ENQ 0x0080 /**< The default enqueue is "MP RTS V2". */
+#define RING_F_MC_RTS_V2_DEQ 0x0100 /**< The default dequeue is "MC RTS V2". */
+
 #endif /* _RTE_RING_CORE_H_ */
diff --git a/lib/ring/rte_ring_elem.h b/lib/ring/rte_ring_elem.h
index b96bfc003f..1352709f94 100644
--- a/lib/ring/rte_ring_elem.h
+++ b/lib/ring/rte_ring_elem.h
@@ -71,6 +71,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -83,6 +86,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -203,6 +209,9 @@ rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
 			free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table, esize, n,
+			free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -385,6 +394,9 @@ rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
@@ -571,6 +583,9 @@ rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
 			n, free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table, esize,
+			n, free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -681,6 +696,9 @@ rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
diff --git a/lib/ring/rte_ring_rts.h b/lib/ring/rte_ring_rts.h
index d7a3863c83..b47e400452 100644
--- a/lib/ring/rte_ring_rts.h
+++ b/lib/ring/rte_ring_rts.h
@@ -84,6 +84,33 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_FIXED, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS ring (multi-consumers safe).
  *
@@ -111,6 +138,33 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_FIXED, available);
 }
 
+/**
+ * Dequeue several objects from an RTS ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -138,6 +192,33 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS  ring (multi-consumers safe).
  * When the requested objects are more than the available objects,
@@ -167,6 +248,35 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, available);
 }
 
+/**
+ * Dequeue several objects from an RTS  ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -213,6 +323,52 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -261,6 +417,54 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Return producer max Head-Tail-Distance (HTD).
  *
@@ -273,7 +477,8 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 static inline uint32_t
 rte_ring_get_prod_htd_max(const struct rte_ring *r)
 {
-	if (r->prod.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->prod.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_prod.htd_max;
 	return UINT32_MAX;
 }
@@ -292,7 +497,8 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->prod.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->prod.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_prod.htd_max = v;
@@ -311,7 +517,8 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 static inline uint32_t
 rte_ring_get_cons_htd_max(const struct rte_ring *r)
 {
-	if (r->cons.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->cons.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_cons.htd_max;
 	return UINT32_MAX;
 }
@@ -330,7 +537,8 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->cons.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->cons.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_cons.htd_max = v;
diff --git a/lib/ring/rte_ring_rts_elem_pvt.h b/lib/ring/rte_ring_rts_elem_pvt.h
index 122650346b..4ce22a93ed 100644
--- a/lib/ring/rte_ring_rts_elem_pvt.h
+++ b/lib/ring/rte_ring_rts_elem_pvt.h
@@ -46,6 +46,92 @@ __rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
 			rte_memory_order_release, rte_memory_order_acquire) == 0);
 }
 
+/**
+ * @file rte_ring_rts_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for Relaxed Tail Sync (RTS) ring mode.
+ * For more information please refer to <rte_ring_rts.h>.
+ */
+
+/**
+ * @internal This function updates tail values.
+ */
+static __rte_always_inline void
+__rte_ring_rts_v2_update_tail(struct rte_ring_rts_headtail *ht,
+	uint32_t old_tail, uint32_t num, uint32_t mask)
+{
+	union __rte_ring_rts_poscnt ot, nt;
+
+	ot.val.cnt = nt.val.cnt = 0;
+	ot.val.pos = old_tail;
+	nt.val.pos = old_tail + num;
+
+	/*
+	 * If the tail is equal to the current enqueues/dequeues, update
+	 * the tail with new value and then continue to try to update the
+	 * tail until the num of the cache is 0, otherwise write the num of
+	 * the current enqueues/dequeues to the cache.
+	 */
+
+	if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+				(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+				rte_memory_order_release, rte_memory_order_acquire) == 0) {
+		ot.val.pos = old_tail;
+
+		/*
+		 * Write the num of the current enqueues/dequeues to the
+		 * corresponding cache.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			num, rte_memory_order_release);
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		/*
+		 * Set the corresponding cache to 0 for next use.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	}
+
+	/*
+	 * Try to update the tail until the num of the corresponding cache is 0.
+	 * Getting here means that the current enqueues/dequeues is trying to update
+	 * the tail of another enqueues/dequeues.
+	 */
+	while (1) {
+		num = rte_atomic_load_explicit(&ht->rts_cache[nt.val.pos & mask].num,
+			rte_memory_order_acquire);
+		if (num == 0)
+			break;
+
+		ot.val.pos = nt.val.pos;
+		nt.val.pos += num;
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	};
+}
+
 /**
  * @internal This function waits till head/tail distance wouldn't
  * exceed pre-defined max value.
@@ -218,6 +304,47 @@ __rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Enqueue several objects on the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param free_space
+ *   returns the amount of space after the enqueue operation has finished
+ * @return
+ *   Actual number of objects enqueued.
+ *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_enqueue_elem(struct rte_ring *r, const void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *free_space)
+{
+	uint32_t free, head;
+
+	n =  __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
+
+	if (n != 0) {
+		__rte_ring_enqueue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_prod, head, n, r->mask);
+	}
+
+	if (free_space != NULL)
+		*free_space = free - n;
+	return n;
+}
+
 /**
  * @internal Dequeue several objects from the RTS ring.
  *
@@ -259,4 +386,45 @@ __rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Dequeue several objects from the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to pull from the ring.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param available
+ *   returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ *   - Actual number of objects dequeued.
+ *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_dequeue_elem(struct rte_ring *r, void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *available)
+{
+	uint32_t entries, head;
+
+	n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
+
+	if (n != 0) {
+		__rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_cons, head, n, r->mask);
+	}
+
+	if (available != NULL)
+		*available = entries - n;
+	return n;
+}
+
 #endif /* _RTE_RING_RTS_ELEM_PVT_H_ */
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* [PATCH v2] ring: add the second version of the RTS interface
  2025-01-05  9:57  5% [PATCH] ring: add the second version of the RTS interface Huichao Cai
@ 2025-01-05 15:13  5% ` Huichao Cai
  2025-01-08  1:41  3%   ` Huichao Cai
  0 siblings, 1 reply; 169+ results
From: Huichao Cai @ 2025-01-05 15:13 UTC (permalink / raw)
  To: honnappa.nagarahalli, konstantin.v.ananyev, thomas; +Cc: dev

The timing of the update of the RTS enqueues/dequeues tail is
limited to the last enqueues/dequeues, which reduces concurrency,
so the RTS interface of the V2 version is added, which makes the tail
of the enqueues/dequeues not limited to the last enqueues/dequeues
and thus enables timely updates to increase concurrency.

Add some corresponding test cases.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/meson.build                   |   1 +
 app/test/test_ring.c                   |  26 +++
 app/test/test_ring_rts_v2_stress.c     |  32 ++++
 app/test/test_ring_stress.c            |   3 +
 app/test/test_ring_stress.h            |   1 +
 devtools/libabigail.abignore           |   6 +
 doc/guides/rel_notes/release_25_03.rst |   2 +
 lib/ring/rte_ring.c                    |  54 ++++++-
 lib/ring/rte_ring.h                    |  12 ++
 lib/ring/rte_ring_core.h               |   9 ++
 lib/ring/rte_ring_elem.h               |  18 +++
 lib/ring/rte_ring_rts.h                | 216 ++++++++++++++++++++++++-
 lib/ring/rte_ring_rts_elem_pvt.h       | 168 +++++++++++++++++++
 13 files changed, 538 insertions(+), 10 deletions(-)
 create mode 100644 app/test/test_ring_rts_v2_stress.c

diff --git a/app/test/meson.build b/app/test/meson.build
index d5cb6a7f7a..e3d8cef3fa 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -166,6 +166,7 @@ source_file_deps = {
     'test_ring_mt_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_perf.c': ['ptr_compress'],
     'test_ring_rts_stress.c': ['ptr_compress'],
+    'test_ring_rts_v2_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress.c': ['ptr_compress'],
     'test_ring_st_peek_stress_zc.c': ['ptr_compress'],
     'test_ring_stress.c': ['ptr_compress'],
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index ba1fec1de3..094f14b859 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -284,6 +284,19 @@ static const struct {
 			.felem = rte_ring_dequeue_bulk_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_bulk,
+			.felem = rte_ring_enqueue_bulk_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_bulk,
+			.felem = rte_ring_dequeue_bulk_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
@@ -349,6 +362,19 @@ static const struct {
 			.felem = rte_ring_dequeue_burst_elem,
 		},
 	},
+	{
+		.desc = "MP_RTS/MC_RTS V2 sync mode",
+		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ,
+		.enq = {
+			.flegacy = rte_ring_enqueue_burst,
+			.felem = rte_ring_enqueue_burst_elem,
+		},
+		.deq = {
+			.flegacy = rte_ring_dequeue_burst,
+			.felem = rte_ring_dequeue_burst_elem,
+		},
+	},
 	{
 		.desc = "MP_HTS/MC_HTS sync mode",
 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
diff --git a/app/test/test_ring_rts_v2_stress.c b/app/test/test_ring_rts_v2_stress.c
new file mode 100644
index 0000000000..6079366a7d
--- /dev/null
+++ b/app/test/test_ring_rts_v2_stress.c
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include "test_ring_stress_impl.h"
+
+static inline uint32_t
+_st_ring_dequeue_bulk(struct rte_ring *r, void **obj, uint32_t n,
+	uint32_t *avail)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk(r, obj, n, avail);
+}
+
+static inline uint32_t
+_st_ring_enqueue_bulk(struct rte_ring *r, void * const *obj, uint32_t n,
+	uint32_t *free)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk(r, obj, n, free);
+}
+
+static int
+_st_ring_init(struct rte_ring *r, const char *name, uint32_t num)
+{
+	return rte_ring_init(r, name, num,
+		RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ);
+}
+
+const struct test test_ring_rts_v2_stress = {
+	.name = "MT_RTS_V2",
+	.nb_case = RTE_DIM(tests),
+	.cases = tests,
+};
diff --git a/app/test/test_ring_stress.c b/app/test/test_ring_stress.c
index 1af45e0fc8..94085acd5e 100644
--- a/app/test/test_ring_stress.c
+++ b/app/test/test_ring_stress.c
@@ -43,6 +43,9 @@ test_ring_stress(void)
 	n += test_ring_rts_stress.nb_case;
 	k += run_test(&test_ring_rts_stress);
 
+	n += test_ring_rts_v2_stress.nb_case;
+	k += run_test(&test_ring_rts_v2_stress);
+
 	n += test_ring_hts_stress.nb_case;
 	k += run_test(&test_ring_hts_stress);
 
diff --git a/app/test/test_ring_stress.h b/app/test/test_ring_stress.h
index 416d68c9a0..505957f6fb 100644
--- a/app/test/test_ring_stress.h
+++ b/app/test/test_ring_stress.h
@@ -34,6 +34,7 @@ struct test {
 
 extern const struct test test_ring_mpmc_stress;
 extern const struct test test_ring_rts_stress;
+extern const struct test test_ring_rts_v2_stress;
 extern const struct test test_ring_hts_stress;
 extern const struct test test_ring_mt_peek_stress;
 extern const struct test test_ring_mt_peek_stress_zc;
diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
index 21b8cd6113..d4dd99a99e 100644
--- a/devtools/libabigail.abignore
+++ b/devtools/libabigail.abignore
@@ -33,3 +33,9 @@
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Temporary exceptions till next major ABI version ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+[suppress_type]
+       type_kind = struct
+       name = rte_ring_rts_cache
+[suppress_type]
+       name = rte_ring_rts_headtail
+       has_data_member_inserted_between = {offset_of(head), end}
diff --git a/doc/guides/rel_notes/release_25_03.rst b/doc/guides/rel_notes/release_25_03.rst
index 426dfcd982..f73bc9e397 100644
--- a/doc/guides/rel_notes/release_25_03.rst
+++ b/doc/guides/rel_notes/release_25_03.rst
@@ -102,6 +102,8 @@ ABI Changes
 
 * No ABI change that would break compatibility with 24.11.
 
+* ring: Added ``rte_ring_rts_cache`` structure and ``rts_cache`` field to the
+  ``rte_ring_rts_headtail`` structure.
 
 Known Issues
 ------------
diff --git a/lib/ring/rte_ring.c b/lib/ring/rte_ring.c
index aebb6d6728..ada1ae88fa 100644
--- a/lib/ring/rte_ring.c
+++ b/lib/ring/rte_ring.c
@@ -43,7 +43,8 @@ EAL_REGISTER_TAILQ(rte_ring_tailq)
 /* mask of all valid flag values to ring_create() */
 #define RING_F_MASK (RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ | \
 		     RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ |	       \
-		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ)
+		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |	       \
+		     RING_F_MP_RTS_V2_ENQ | RING_F_MC_RTS_V2_DEQ)
 
 /* true if x is a power of 2 */
 #define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -106,6 +107,7 @@ reset_headtail(void *p)
 		ht->tail = 0;
 		break;
 	case RTE_RING_SYNC_MT_RTS:
+	case RTE_RING_SYNC_MT_RTS_V2:
 		ht_rts->head.raw = 0;
 		ht_rts->tail.raw = 0;
 		break;
@@ -135,9 +137,11 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	enum rte_ring_sync_type *cons_st)
 {
 	static const uint32_t prod_st_flags =
-		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ);
+		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ |
+		RING_F_MP_RTS_V2_ENQ);
 	static const uint32_t cons_st_flags =
-		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ);
+		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ |
+		RING_F_MC_RTS_V2_DEQ);
 
 	switch (flags & prod_st_flags) {
 	case 0:
@@ -152,6 +156,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MP_HTS_ENQ:
 		*prod_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MP_RTS_V2_ENQ:
+		*prod_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -169,6 +176,9 @@ get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
 	case RING_F_MC_HTS_DEQ:
 		*cons_st = RTE_RING_SYNC_MT_HTS;
 		break;
+	case RING_F_MC_RTS_V2_DEQ:
+		*cons_st = RTE_RING_SYNC_MT_RTS_V2;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -239,6 +249,28 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
 	if (flags & RING_F_MC_RTS_DEQ)
 		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
 
+	/* set default values for head-tail distance and allocate memory to cache */
+	if (flags & RING_F_MP_RTS_V2_ENQ) {
+		rte_ring_set_prod_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_prod.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_PROD_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_prod.rts_cache == NULL) {
+			RING_LOG(ERR, "Cannot reserve memory for rts prod cache");
+			return -ENOMEM;
+		}
+	}
+	if (flags & RING_F_MC_RTS_V2_DEQ) {
+		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
+		r->rts_cons.rts_cache = (struct rte_ring_rts_cache *)rte_zmalloc(
+			"RTS_CONS_CACHE", sizeof(struct rte_ring_rts_cache) * r->size, 0);
+		if (r->rts_cons.rts_cache == NULL) {
+			if (flags & RING_F_MP_RTS_V2_ENQ)
+				rte_free(r->rts_prod.rts_cache);
+			RING_LOG(ERR, "Cannot reserve memory for rts cons cache");
+			return -ENOMEM;
+		}
+	}
+
 	return 0;
 }
 
@@ -293,9 +325,14 @@ rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count,
 					 mz_flags, alignof(typeof(*r)));
 	if (mz != NULL) {
 		r = mz->addr;
-		/* no need to check return value here, we already checked the
-		 * arguments above */
-		rte_ring_init(r, name, requested_count, flags);
+
+		if (rte_ring_init(r, name, requested_count, flags)) {
+			rte_free(te);
+			if (rte_memzone_free(mz) != 0)
+				RING_LOG(ERR, "Cannot free memory for ring");
+			rte_mcfg_tailq_write_unlock();
+			return NULL;
+		}
 
 		te->data = (void *) r;
 		r->memzone = mz;
@@ -358,6 +395,11 @@ rte_ring_free(struct rte_ring *r)
 
 	rte_mcfg_tailq_write_unlock();
 
+	if (r->flags & RING_F_MP_RTS_V2_ENQ)
+		rte_free(r->rts_prod.rts_cache);
+	if (r->flags & RING_F_MC_RTS_V2_DEQ)
+		rte_free(r->rts_cons.rts_cache);
+
 	if (rte_memzone_free(r->memzone) != 0)
 		RING_LOG(ERR, "Cannot free memory");
 
diff --git a/lib/ring/rte_ring.h b/lib/ring/rte_ring.h
index 11ca69c73d..2b35ce038e 100644
--- a/lib/ring/rte_ring.h
+++ b/lib/ring/rte_ring.h
@@ -89,6 +89,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -101,6 +104,9 @@ ssize_t rte_ring_get_memsize(unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -149,6 +155,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -161,6 +170,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
diff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h
index 6cd6ce9884..9e627d26c1 100644
--- a/lib/ring/rte_ring_core.h
+++ b/lib/ring/rte_ring_core.h
@@ -55,6 +55,7 @@ enum rte_ring_sync_type {
 	RTE_RING_SYNC_ST,     /**< single thread only */
 	RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
 	RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
+	RTE_RING_SYNC_MT_RTS_V2, /**< multi-thread relaxed tail sync v2 */
 };
 
 /**
@@ -82,11 +83,16 @@ union __rte_ring_rts_poscnt {
 	} val;
 };
 
+struct rte_ring_rts_cache {
+	volatile RTE_ATOMIC(uint32_t) num;      /**< Number of objs. */
+};
+
 struct rte_ring_rts_headtail {
 	volatile union __rte_ring_rts_poscnt tail;
 	enum rte_ring_sync_type sync_type;  /**< sync type of prod/cons */
 	uint32_t htd_max;   /**< max allowed distance between head/tail */
 	volatile union __rte_ring_rts_poscnt head;
+	struct rte_ring_rts_cache *rts_cache; /**< Cache of prod/cons */
 };
 
 union __rte_ring_hts_pos {
@@ -163,4 +169,7 @@ struct rte_ring {
 #define RING_F_MP_HTS_ENQ 0x0020 /**< The default enqueue is "MP HTS". */
 #define RING_F_MC_HTS_DEQ 0x0040 /**< The default dequeue is "MC HTS". */
 
+#define RING_F_MP_RTS_V2_ENQ 0x0080 /**< The default enqueue is "MP RTS V2". */
+#define RING_F_MC_RTS_V2_DEQ 0x0100 /**< The default dequeue is "MC RTS V2". */
+
 #endif /* _RTE_RING_CORE_H_ */
diff --git a/lib/ring/rte_ring_elem.h b/lib/ring/rte_ring_elem.h
index b96bfc003f..1352709f94 100644
--- a/lib/ring/rte_ring_elem.h
+++ b/lib/ring/rte_ring_elem.h
@@ -71,6 +71,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer RTS mode".
+ *      - RING_F_MP_RTS_V2_ENQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *        is "multi-producer RTS V2 mode".
  *      - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
  *        using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
  *        is "multi-producer HTS mode".
@@ -83,6 +86,9 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
  *      - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer RTS mode".
+ *      - RING_F_MC_RTS_V2_DEQ: If this flag is set, the default behavior when
+ *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *        is "multi-consumer RTS V2 mode".
  *      - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
  *        using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
  *        is "multi-consumer HTS mode".
@@ -203,6 +209,9 @@ rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
 			free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table, esize, n,
+			free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -385,6 +394,9 @@ rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
@@ -571,6 +583,9 @@ rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
 			n, free_space);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table, esize,
+			n, free_space);
 	}
 
 	/* valid ring should never reach this point */
@@ -681,6 +696,9 @@ rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 	case RTE_RING_SYNC_MT_HTS:
 		return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
 			n, available);
+	case RTE_RING_SYNC_MT_RTS_V2:
+		return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table, esize,
+			n, available);
 	}
 
 	/* valid ring should never reach this point */
diff --git a/lib/ring/rte_ring_rts.h b/lib/ring/rte_ring_rts.h
index d7a3863c83..b47e400452 100644
--- a/lib/ring/rte_ring_rts.h
+++ b/lib/ring/rte_ring_rts.h
@@ -84,6 +84,33 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_FIXED, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS ring (multi-consumers safe).
  *
@@ -111,6 +138,33 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_FIXED, available);
 }
 
+/**
+ * Dequeue several objects from an RTS ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_FIXED, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -138,6 +192,33 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, free_space);
 }
 
+/**
+ * Enqueue several objects on the RTS ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	return __rte_ring_do_rts_v2_enqueue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, free_space);
+}
+
 /**
  * Dequeue several objects from an RTS  ring (multi-consumers safe).
  * When the requested objects are more than the available objects,
@@ -167,6 +248,35 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
 			RTE_RING_QUEUE_VARIABLE, available);
 }
 
+/**
+ * Dequeue several objects from an RTS  ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects that will be filled.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	return __rte_ring_do_rts_v2_dequeue_elem(r, obj_table, esize, n,
+			RTE_RING_QUEUE_VARIABLE, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -213,6 +323,52 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   The number of objects enqueued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   The number of objects dequeued, either 0 or n
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_bulk(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_bulk_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Enqueue several objects on the RTS ring (multi-producers safe).
  *
@@ -261,6 +417,54 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 			sizeof(uintptr_t), n, available);
 }
 
+/**
+ * Enqueue several objects on the RTS V2 ring (multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ *   if non-NULL, returns the amount of space in the ring after the
+ *   enqueue operation has finished.
+ * @return
+ *   - n: Actual number of objects enqueued.
+ */
+static __rte_always_inline unsigned int
+rte_ring_mp_rts_v2_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+			 unsigned int n, unsigned int *free_space)
+{
+	return rte_ring_mp_rts_v2_enqueue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, free_space);
+}
+
+/**
+ * Dequeue several objects from an RTS V2 ring (multi-consumers safe).
+ * When the requested objects are more than the available objects,
+ * only dequeue the actual number of objects.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ *   If non-NULL, returns the number of remaining ring entries after the
+ *   dequeue has finished.
+ * @return
+ *   - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static __rte_always_inline unsigned int
+rte_ring_mc_rts_v2_dequeue_burst(struct rte_ring *r, void **obj_table,
+		unsigned int n, unsigned int *available)
+{
+	return rte_ring_mc_rts_v2_dequeue_burst_elem(r, obj_table,
+			sizeof(uintptr_t), n, available);
+}
+
 /**
  * Return producer max Head-Tail-Distance (HTD).
  *
@@ -273,7 +477,8 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
 static inline uint32_t
 rte_ring_get_prod_htd_max(const struct rte_ring *r)
 {
-	if (r->prod.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->prod.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_prod.htd_max;
 	return UINT32_MAX;
 }
@@ -292,7 +497,8 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->prod.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->prod.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->prod.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_prod.htd_max = v;
@@ -311,7 +517,8 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
 static inline uint32_t
 rte_ring_get_cons_htd_max(const struct rte_ring *r)
 {
-	if (r->cons.sync_type == RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type == RTE_RING_SYNC_MT_RTS) ||
+			(r->cons.sync_type == RTE_RING_SYNC_MT_RTS_V2))
 		return r->rts_cons.htd_max;
 	return UINT32_MAX;
 }
@@ -330,7 +537,8 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r)
 static inline int
 rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
 {
-	if (r->cons.sync_type != RTE_RING_SYNC_MT_RTS)
+	if ((r->cons.sync_type != RTE_RING_SYNC_MT_RTS) &&
+			(r->cons.sync_type != RTE_RING_SYNC_MT_RTS_V2))
 		return -ENOTSUP;
 
 	r->rts_cons.htd_max = v;
diff --git a/lib/ring/rte_ring_rts_elem_pvt.h b/lib/ring/rte_ring_rts_elem_pvt.h
index 122650346b..4ce22a93ed 100644
--- a/lib/ring/rte_ring_rts_elem_pvt.h
+++ b/lib/ring/rte_ring_rts_elem_pvt.h
@@ -46,6 +46,92 @@ __rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
 			rte_memory_order_release, rte_memory_order_acquire) == 0);
 }
 
+/**
+ * @file rte_ring_rts_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for Relaxed Tail Sync (RTS) ring mode.
+ * For more information please refer to <rte_ring_rts.h>.
+ */
+
+/**
+ * @internal This function updates tail values.
+ */
+static __rte_always_inline void
+__rte_ring_rts_v2_update_tail(struct rte_ring_rts_headtail *ht,
+	uint32_t old_tail, uint32_t num, uint32_t mask)
+{
+	union __rte_ring_rts_poscnt ot, nt;
+
+	ot.val.cnt = nt.val.cnt = 0;
+	ot.val.pos = old_tail;
+	nt.val.pos = old_tail + num;
+
+	/*
+	 * If the tail is equal to the current enqueues/dequeues, update
+	 * the tail with new value and then continue to try to update the
+	 * tail until the num of the cache is 0, otherwise write the num of
+	 * the current enqueues/dequeues to the cache.
+	 */
+
+	if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+				(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+				rte_memory_order_release, rte_memory_order_acquire) == 0) {
+		ot.val.pos = old_tail;
+
+		/*
+		 * Write the num of the current enqueues/dequeues to the
+		 * corresponding cache.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			num, rte_memory_order_release);
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		/*
+		 * Set the corresponding cache to 0 for next use.
+		 */
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	}
+
+	/*
+	 * Try to update the tail until the num of the corresponding cache is 0.
+	 * Getting here means that the current enqueues/dequeues is trying to update
+	 * the tail of another enqueues/dequeues.
+	 */
+	while (1) {
+		num = rte_atomic_load_explicit(&ht->rts_cache[nt.val.pos & mask].num,
+			rte_memory_order_acquire);
+		if (num == 0)
+			break;
+
+		ot.val.pos = nt.val.pos;
+		nt.val.pos += num;
+
+		/*
+		 * There may be competition with another enqueues/dequeues
+		 * for the update tail. The winner continues to try to update
+		 * the tail, and the loser exits.
+		 */
+		if (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
+					(uint64_t *)(uintptr_t)&ot.raw, nt.raw,
+					rte_memory_order_release, rte_memory_order_acquire) == 0)
+			return;
+
+		rte_atomic_store_explicit(&ht->rts_cache[ot.val.pos & mask].num,
+			0, rte_memory_order_release);
+	};
+}
+
 /**
  * @internal This function waits till head/tail distance wouldn't
  * exceed pre-defined max value.
@@ -218,6 +304,47 @@ __rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Enqueue several objects on the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param free_space
+ *   returns the amount of space after the enqueue operation has finished
+ * @return
+ *   Actual number of objects enqueued.
+ *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_enqueue_elem(struct rte_ring *r, const void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *free_space)
+{
+	uint32_t free, head;
+
+	n =  __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
+
+	if (n != 0) {
+		__rte_ring_enqueue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_prod, head, n, r->mask);
+	}
+
+	if (free_space != NULL)
+		*free_space = free - n;
+	return n;
+}
+
 /**
  * @internal Dequeue several objects from the RTS ring.
  *
@@ -259,4 +386,45 @@ __rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
 	return n;
 }
 
+/**
+ * @internal Dequeue several objects from the RTS ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of objects.
+ * @param esize
+ *   The size of ring element, in bytes. It must be a multiple of 4.
+ *   This must be the same value used while creating the ring. Otherwise
+ *   the results are undefined.
+ * @param n
+ *   The number of objects to pull from the ring.
+ * @param behavior
+ *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param available
+ *   returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ *   - Actual number of objects dequeued.
+ *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_v2_dequeue_elem(struct rte_ring *r, void *obj_table,
+	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+	uint32_t *available)
+{
+	uint32_t entries, head;
+
+	n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
+
+	if (n != 0) {
+		__rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+		__rte_ring_rts_v2_update_tail(&r->rts_cons, head, n, r->mask);
+	}
+
+	if (available != NULL)
+		*available = entries - n;
+	return n;
+}
+
 #endif /* _RTE_RING_RTS_ELEM_PVT_H_ */
-- 
2.27.0


^ permalink raw reply	[relevance 5%]

* RE: [PATCH v16 1/4] lib: add generic support for reading PMU events
  2024-12-06 18:15  3%       ` Konstantin Ananyev
@ 2025-01-07  7:45  0%         ` Tomasz Duszynski
  0 siblings, 0 replies; 169+ results
From: Tomasz Duszynski @ 2025-01-07  7:45 UTC (permalink / raw)
  To: Konstantin Ananyev, Thomas Monjalon
  Cc: Ruifeng.Wang, bruce.richardson, david.marchand, dev, Jerin Jacob,
	konstantin.v.ananyev, mattias.ronnblom, mb, roretzla, stephen,
	zhoumin

>> Add support for programming PMU counters and reading their values in
>> runtime bypassing kernel completely.
>>
>> This is especially useful in cases where CPU cores are isolated i.e
>> run dedicated tasks. In such cases one cannot use standard perf
>> utility without sacrificing latency and performance.
>>
>> Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
>> ---
>
>Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
>
>As future possible enhancements - I think it would be useful to make control-
>path API MT safe, plus probably try to hide some of the exposed internal
>structures (rte_pmu_event_group, etc.) inside .c (to minimize surface for
>possible ABI breakage).
>

Thanks. Yes sure, that series is not one time-addition. It will be improved over time. 

>> --
>> 2.34.1


^ permalink raw reply	[relevance 0%]

* [v2 3/4] crypto/virtio: add vhost backend to virtio_user
  @ 2025-01-07 18:44  1% ` Gowrishankar Muthukrishnan
  0 siblings, 0 replies; 169+ results
From: Gowrishankar Muthukrishnan @ 2025-01-07 18:44 UTC (permalink / raw)
  To: dev, Akhil Goyal, Maxime Coquelin, Chenbo Xia, Fan Zhang, Jay Zhou
  Cc: jerinj, anoobj, David Marchand, Gowrishankar Muthukrishnan

Add vhost backend to virtio_user crypto.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
 drivers/crypto/virtio/meson.build             |   7 +
 drivers/crypto/virtio/virtio_cryptodev.c      |  57 +-
 drivers/crypto/virtio/virtio_cryptodev.h      |   3 +
 drivers/crypto/virtio/virtio_pci.h            |   7 +
 drivers/crypto/virtio/virtio_ring.h           |   6 -
 .../crypto/virtio/virtio_user/vhost_vdpa.c    | 312 +++++++
 .../virtio/virtio_user/virtio_user_dev.c      | 776 ++++++++++++++++++
 .../virtio/virtio_user/virtio_user_dev.h      |  88 ++
 drivers/crypto/virtio/virtio_user_cryptodev.c | 587 +++++++++++++
 9 files changed, 1815 insertions(+), 28 deletions(-)
 create mode 100644 drivers/crypto/virtio/virtio_user/vhost_vdpa.c
 create mode 100644 drivers/crypto/virtio/virtio_user/virtio_user_dev.c
 create mode 100644 drivers/crypto/virtio/virtio_user/virtio_user_dev.h
 create mode 100644 drivers/crypto/virtio/virtio_user_cryptodev.c

diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build
index 8181c8296f..e5bce54cca 100644
--- a/drivers/crypto/virtio/meson.build
+++ b/drivers/crypto/virtio/meson.build
@@ -16,3 +16,10 @@ sources = files(
         'virtio_rxtx.c',
         'virtqueue.c',
 )
+
+if is_linux
+    sources += files('virtio_user_cryptodev.c',
+        'virtio_user/vhost_vdpa.c',
+        'virtio_user/virtio_user_dev.c')
+    deps += ['bus_vdev', 'common_virtio']
+endif
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
index d3db4f898e..c9f20cb338 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.c
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -544,24 +544,12 @@ virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
 	return 0;
 }
 
-/*
- * This function is based on probe() function
- * It returns 0 on success.
- */
-static int
-crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
-		struct rte_cryptodev_pmd_init_params *init_params)
+int
+crypto_virtio_dev_init(struct rte_cryptodev *cryptodev, uint64_t features,
+		struct rte_pci_device *pci_dev)
 {
-	struct rte_cryptodev *cryptodev;
 	struct virtio_crypto_hw *hw;
 
-	PMD_INIT_FUNC_TRACE();
-
-	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
-					init_params);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
 	cryptodev->driver_id = cryptodev_virtio_driver_id;
 	cryptodev->dev_ops = &virtio_crypto_dev_ops;
 
@@ -578,16 +566,41 @@ crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
 	hw->dev_id = cryptodev->data->dev_id;
 	hw->virtio_dev_capabilities = virtio_capabilities;
 
-	VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
-		cryptodev->data->dev_id, pci_dev->id.vendor_id,
-		pci_dev->id.device_id);
+	if (pci_dev) {
+		/* pci device init */
+		VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+			cryptodev->data->dev_id, pci_dev->id.vendor_id,
+			pci_dev->id.device_id);
 
-	/* pci device init */
-	if (vtpci_cryptodev_init(pci_dev, hw))
+		if (vtpci_cryptodev_init(pci_dev, hw))
+			return -1;
+	}
+
+	if (virtio_crypto_init_device(cryptodev, features) < 0)
 		return -1;
 
-	if (virtio_crypto_init_device(cryptodev,
-			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+	return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+		struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *cryptodev;
+
+	PMD_INIT_FUNC_TRACE();
+
+	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+					init_params);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (crypto_virtio_dev_init(cryptodev, VIRTIO_CRYPTO_PMD_GUEST_FEATURES,
+			pci_dev) < 0)
 		return -1;
 
 	rte_cryptodev_pmd_probing_finish(cryptodev);
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
index b4bdd9800b..95a1e09dca 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.h
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -74,4 +74,7 @@ uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
 		struct rte_crypto_op **tx_pkts,
 		uint16_t nb_pkts);
 
+int crypto_virtio_dev_init(struct rte_cryptodev *cryptodev, uint64_t features,
+		struct rte_pci_device *pci_dev);
+
 #endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
index 79945cb88e..c75777e005 100644
--- a/drivers/crypto/virtio/virtio_pci.h
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -20,6 +20,9 @@ struct virtqueue;
 #define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
 #define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
 
+/* VirtIO device IDs. */
+#define VIRTIO_ID_CRYPTO  20
+
 /* VirtIO ABI version, this must match exactly. */
 #define VIRTIO_PCI_ABI_VERSION 0
 
@@ -56,8 +59,12 @@ struct virtqueue;
 #define VIRTIO_CONFIG_STATUS_DRIVER    0x02
 #define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
 #define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_DEV_NEED_RESET	0x40
 #define VIRTIO_CONFIG_STATUS_FAILED    0x80
 
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_VRING_ALIGN 4096
+
 /*
  * Each virtqueue indirect descriptor list must be physically contiguous.
  * To allow us to malloc(9) each list individually, limit the number
diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h
index c74d1172b7..4b418f6e60 100644
--- a/drivers/crypto/virtio/virtio_ring.h
+++ b/drivers/crypto/virtio/virtio_ring.h
@@ -181,12 +181,6 @@ vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
 				sizeof(struct vring_packed_desc_event)), align);
 }
 
-static inline void
-vring_init(struct vring *vr, unsigned int num, uint8_t *p, unsigned long align)
-{
-	vring_init_split(vr, p, 0, align, num);
-}
-
 /*
  * The following is used with VIRTIO_RING_F_EVENT_IDX.
  * Assuming a given event_idx value from the other size, if we have
diff --git a/drivers/crypto/virtio/virtio_user/vhost_vdpa.c b/drivers/crypto/virtio/virtio_user/vhost_vdpa.c
new file mode 100644
index 0000000000..41696c4095
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/vhost_vdpa.c
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Marvell
+ */
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_memory.h>
+
+#include "virtio_user/vhost.h"
+#include "virtio_user/vhost_logs.h"
+
+#include "virtio_user_dev.h"
+#include "../virtio_pci.h"
+
+struct vhost_vdpa_data {
+	int vhostfd;
+	uint64_t protocol_features;
+};
+
+#define VHOST_VDPA_SUPPORTED_BACKEND_FEATURES		\
+	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2	|	\
+	1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
+
+/* vhost kernel & vdpa ioctls */
+#define VHOST_VIRTIO 0xAF
+#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
+#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
+#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
+#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
+#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
+#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
+#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
+#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
+#define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
+#define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
+#define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
+#define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+	uint64_t iova;
+	uint64_t size;
+	uint64_t uaddr;
+#define VHOST_ACCESS_RO      0x1
+#define VHOST_ACCESS_WO      0x2
+#define VHOST_ACCESS_RW      0x3
+	uint8_t perm;
+#define VHOST_IOTLB_MISS           1
+#define VHOST_IOTLB_UPDATE         2
+#define VHOST_IOTLB_INVALIDATE     3
+#define VHOST_IOTLB_ACCESS_FAIL    4
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
+	uint8_t type;
+};
+
+#define VHOST_IOTLB_MSG_V2 0x2
+
+struct vhost_vdpa_config {
+	uint32_t off;
+	uint32_t len;
+	uint8_t buf[];
+};
+
+struct vhost_msg {
+	uint32_t type;
+	uint32_t reserved;
+	union {
+		struct vhost_iotlb_msg iotlb;
+		uint8_t padding[64];
+	};
+};
+
+
+static int
+vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
+{
+	int ret;
+
+	ret = ioctl(fd, request, arg);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
+				request, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
+}
+
+static int
+vhost_vdpa_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
+}
+
+static int
+vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+	int ret;
+
+	ret = vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_FEATURES, features);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get features");
+		return -1;
+	}
+
+	/* Negotiated vDPA backend features */
+	ret = vhost_vdpa_get_protocol_features(dev, &data->protocol_features);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to get backend features");
+		return -1;
+	}
+
+	data->protocol_features &= VHOST_VDPA_SUPPORTED_BACKEND_FEATURES;
+
+	ret = vhost_vdpa_set_protocol_features(dev, data->protocol_features);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to set backend features");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+
+	return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
+}
+
+/**
+ * Set up environment to talk with a vhost vdpa backend.
+ *
+ * @return
+ *   - (-1) if fail to set up;
+ *   - (>=0) if successful.
+ */
+static int
+vhost_vdpa_setup(struct virtio_user_dev *dev)
+{
+	struct vhost_vdpa_data *data;
+	uint32_t did = (uint32_t)-1;
+
+	data = malloc(sizeof(*data));
+	if (!data) {
+		PMD_DRV_LOG(ERR, "(%s) Faidle to allocate backend data", dev->path);
+		return -1;
+	}
+
+	data->vhostfd = open(dev->path, O_RDWR);
+	if (data->vhostfd < 0) {
+		PMD_DRV_LOG(ERR, "Failed to open %s: %s",
+				dev->path, strerror(errno));
+		free(data);
+		return -1;
+	}
+
+	if (ioctl(data->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
+			did != VIRTIO_ID_CRYPTO) {
+		PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u", did);
+		close(data->vhostfd);
+		free(data);
+		return -1;
+	}
+
+	dev->backend_data = data;
+
+	return 0;
+}
+
+static int
+vhost_vdpa_cvq_enable(struct virtio_user_dev *dev, int enable)
+{
+	struct vhost_vring_state state = {
+		.index = dev->max_queue_pairs,
+		.num   = enable,
+	};
+
+	return vhost_vdpa_set_vring_enable(dev, &state);
+}
+
+static int
+vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
+				uint16_t pair_idx,
+				int enable)
+{
+	struct vhost_vring_state state = {
+		.index = pair_idx,
+		.num   = enable,
+	};
+
+	if (dev->qp_enabled[pair_idx] == enable)
+		return 0;
+
+	if (vhost_vdpa_set_vring_enable(dev, &state))
+		return -1;
+
+	dev->qp_enabled[pair_idx] = enable;
+	return 0;
+}
+
+static int
+vhost_vdpa_update_link_state(struct virtio_user_dev *dev)
+{
+	/* TODO: It is W/A until a cleaner approach to find cpt status */
+	dev->crypto_status = VIRTIO_CRYPTO_S_HW_READY;
+	return 0;
+}
+
+static int
+vhost_vdpa_get_nr_vrings(struct virtio_user_dev *dev)
+{
+	int nr_vrings = dev->max_queue_pairs;
+
+	return nr_vrings;
+}
+
+static int
+vhost_vdpa_unmap_notification_area(struct virtio_user_dev *dev)
+{
+	int i, nr_vrings;
+
+	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
+
+	for (i = 0; i < nr_vrings; i++) {
+		if (dev->notify_area[i])
+			munmap(dev->notify_area[i], getpagesize());
+	}
+	free(dev->notify_area);
+	dev->notify_area = NULL;
+
+	return 0;
+}
+
+static int
+vhost_vdpa_map_notification_area(struct virtio_user_dev *dev)
+{
+	struct vhost_vdpa_data *data = dev->backend_data;
+	int nr_vrings, i, page_size = getpagesize();
+	uint16_t **notify_area;
+
+	nr_vrings = vhost_vdpa_get_nr_vrings(dev);
+
+	/* CQ is another vring */
+	nr_vrings++;
+
+	notify_area = malloc(nr_vrings * sizeof(*notify_area));
+	if (!notify_area) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to allocate notify area array", dev->path);
+		return -1;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		notify_area[i] = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED | MAP_FILE,
+					data->vhostfd, i * page_size);
+		if (notify_area[i] == MAP_FAILED) {
+			PMD_DRV_LOG(ERR, "(%s) Map failed for notify address of queue %d",
+					dev->path, i);
+			i--;
+			goto map_err;
+		}
+	}
+	dev->notify_area = notify_area;
+
+	return 0;
+
+map_err:
+	for (; i >= 0; i--)
+		munmap(notify_area[i], page_size);
+	free(notify_area);
+
+	return -1;
+}
+
+struct virtio_user_backend_ops virtio_crypto_ops_vdpa = {
+	.setup = vhost_vdpa_setup,
+	.get_features = vhost_vdpa_get_features,
+	.cvq_enable = vhost_vdpa_cvq_enable,
+	.enable_qp = vhost_vdpa_enable_queue_pair,
+	.update_link_state = vhost_vdpa_update_link_state,
+	.map_notification_area = vhost_vdpa_map_notification_area,
+	.unmap_notification_area = vhost_vdpa_unmap_notification_area,
+};
diff --git a/drivers/crypto/virtio/virtio_user/virtio_user_dev.c b/drivers/crypto/virtio/virtio_user/virtio_user_dev.c
new file mode 100644
index 0000000000..ac53ca78d4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/virtio_user_dev.c
@@ -0,0 +1,776 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Marvell.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_io.h>
+
+#include "virtio_user/vhost.h"
+#include "virtio_user/vhost_logs.h"
+#include "virtio_logs.h"
+
+#include "cryptodev_pmd.h"
+#include "virtio_crypto.h"
+#include "virtio_cvq.h"
+#include "virtio_user_dev.h"
+#include "virtqueue.h"
+
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
+const char * const crypto_virtio_user_backend_strings[] = {
+	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
+	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
+};
+
+static int
+virtio_user_uninit_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	if (dev->kickfds[queue_sel] >= 0) {
+		close(dev->kickfds[queue_sel]);
+		dev->kickfds[queue_sel] = -1;
+	}
+
+	if (dev->callfds[queue_sel] >= 0) {
+		close(dev->callfds[queue_sel]);
+		dev->callfds[queue_sel] = -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_init_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	/* May use invalid flag, but some backend uses kickfd and
+	 * callfd as criteria to judge if dev is alive. so finally we
+	 * use real event_fd.
+	 */
+	dev->callfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (dev->callfds[queue_sel] < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to setup callfd for queue %u: %s",
+				dev->path, queue_sel, strerror(errno));
+		return -1;
+	}
+	dev->kickfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (dev->kickfds[queue_sel] < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to setup kickfd for queue %u: %s",
+				dev->path, queue_sel, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_destroy_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	struct vhost_vring_state state;
+	int ret;
+
+	state.index = queue_sel;
+	ret = dev->ops->get_vring_base(dev, &state);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to destroy queue %u", dev->path, queue_sel);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+	 * firstly because vhost depends on this msg to allocate virtqueue
+	 * pair.
+	 */
+	struct vhost_vring_file file;
+	int ret;
+
+	file.index = queue_sel;
+	file.fd = dev->callfds[queue_sel];
+	ret = dev->ops->set_vring_call(dev, &file);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	int ret;
+	struct vhost_vring_file file;
+	struct vhost_vring_state state;
+	struct vring *vring = &dev->vrings.split[queue_sel];
+	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+	uint64_t desc_addr, avail_addr, used_addr;
+	struct vhost_vring_addr addr = {
+		.index = queue_sel,
+		.log_guest_addr = 0,
+		.flags = 0, /* disable log */
+	};
+
+	if (queue_sel == dev->max_queue_pairs) {
+		if (!dev->scvq) {
+			PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing",
+					dev->path);
+			goto err;
+		}
+
+		/* Use shadow control queue information */
+		vring = &dev->scvq->vq_split.ring;
+		pq_vring = &dev->scvq->vq_packed.ring;
+	}
+
+	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+		desc_addr = pq_vring->desc_iova;
+		avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+		used_addr =  RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+						VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
+	} else {
+		desc_addr = vring->desc_iova;
+		avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+		used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+					VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
+	}
+
+	state.index = queue_sel;
+	state.num = vring->num;
+	ret = dev->ops->set_vring_num(dev, &state);
+	if (ret < 0)
+		goto err;
+
+	state.index = queue_sel;
+	state.num = 0; /* no reservation */
+	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
+		state.num |= (1 << 15);
+	ret = dev->ops->set_vring_base(dev, &state);
+	if (ret < 0)
+		goto err;
+
+	ret = dev->ops->set_vring_addr(dev, &addr);
+	if (ret < 0)
+		goto err;
+
+	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
+	 * lastly because vhost depends on this msg to judge if
+	 * virtio is ready.
+	 */
+	file.index = queue_sel;
+	file.fd = dev->kickfds[queue_sel];
+	ret = dev->ops->set_vring_kick(dev, &file);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel);
+
+	return -1;
+}
+
+static int
+virtio_user_foreach_queue(struct virtio_user_dev *dev,
+			int (*fn)(struct virtio_user_dev *, uint32_t))
+{
+	uint32_t i, nr_vq;
+
+	nr_vq = dev->max_queue_pairs;
+
+	for (i = 0; i < nr_vq; i++)
+		if (fn(dev, i) < 0)
+			return -1;
+
+	return 0;
+}
+
+int
+crypto_virtio_user_dev_set_features(struct virtio_user_dev *dev)
+{
+	uint64_t features;
+	int ret = -1;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	/* Step 0: tell vhost to create queues */
+	if (virtio_user_foreach_queue(dev, virtio_user_create_queue) < 0)
+		goto error;
+
+	features = dev->features;
+
+	ret = dev->ops->set_features(dev, features);
+	if (ret < 0)
+		goto error;
+	PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
+error:
+	pthread_mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+int
+crypto_virtio_user_start_device(struct virtio_user_dev *dev)
+{
+	int ret;
+
+	/*
+	 * XXX workaround!
+	 *
+	 * We need to make sure that the locks will be
+	 * taken in the correct order to avoid deadlocks.
+	 *
+	 * Before releasing this lock, this thread should
+	 * not trigger any memory hotplug events.
+	 *
+	 * This is a temporary workaround, and should be
+	 * replaced when we get proper supports from the
+	 * memory subsystem in the future.
+	 */
+	rte_mcfg_mem_read_lock();
+	pthread_mutex_lock(&dev->mutex);
+
+	/* Step 2: share memory regions */
+	ret = dev->ops->set_memory_table(dev);
+	if (ret < 0)
+		goto error;
+
+	/* Step 3: kick queues */
+	ret = virtio_user_foreach_queue(dev, virtio_user_kick_queue);
+	if (ret < 0)
+		goto error;
+
+	ret = virtio_user_kick_queue(dev, dev->max_queue_pairs);
+	if (ret < 0)
+		goto error;
+
+	/* Step 4: enable queues */
+	for (int i = 0; i < dev->max_queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 1);
+		if (ret < 0)
+			goto error;
+	}
+
+	dev->started = true;
+
+	pthread_mutex_unlock(&dev->mutex);
+	rte_mcfg_mem_read_unlock();
+
+	return 0;
+error:
+	pthread_mutex_unlock(&dev->mutex);
+	rte_mcfg_mem_read_unlock();
+
+	PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path);
+
+	/* TODO: free resource here or caller to check */
+	return -1;
+}
+
+int crypto_virtio_user_stop_device(struct virtio_user_dev *dev)
+{
+	uint32_t i;
+	int ret;
+
+	pthread_mutex_lock(&dev->mutex);
+	if (!dev->started)
+		goto out;
+
+	for (i = 0; i < dev->max_queue_pairs; ++i) {
+		ret = dev->ops->enable_qp(dev, i, 0);
+		if (ret < 0)
+			goto err;
+	}
+
+	if (dev->scvq) {
+		ret = dev->ops->cvq_enable(dev, 0);
+		if (ret < 0)
+			goto err;
+	}
+
+	/* Stop the backend. */
+	if (virtio_user_foreach_queue(dev, virtio_user_destroy_queue) < 0)
+		goto err;
+
+	dev->started = false;
+
+out:
+	pthread_mutex_unlock(&dev->mutex);
+
+	return 0;
+err:
+	pthread_mutex_unlock(&dev->mutex);
+
+	PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path);
+
+	return -1;
+}
+
+static int
+virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp)
+{
+	int ret;
+
+	if (!dev->ops->get_config) {
+		dev->max_queue_pairs = user_max_qp;
+		return 0;
+	}
+
+	ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs,
+			offsetof(struct virtio_crypto_config, max_dataqueues),
+			sizeof(uint16_t));
+	if (ret) {
+		/*
+		 * We need to know the max queue pair from the device so that
+		 * the control queue gets the right index.
+		 */
+		dev->max_queue_pairs = 1;
+		PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_dev_init_cipher_services(struct virtio_user_dev *dev)
+{
+	struct virtio_crypto_config config;
+	int ret;
+
+	dev->crypto_services = RTE_BIT32(VIRTIO_CRYPTO_SERVICE_CIPHER);
+	dev->cipher_algo = 0;
+	dev->auth_algo = 0;
+	dev->akcipher_algo = 0;
+
+	if (!dev->ops->get_config)
+		return 0;
+
+	ret = dev->ops->get_config(dev, (uint8_t *)&config,	0, sizeof(config));
+	if (ret) {
+		PMD_DRV_LOG(ERR, "(%s) Failed to get crypto config from device", dev->path);
+		return ret;
+	}
+
+	dev->crypto_services = config.crypto_services;
+	dev->cipher_algo = ((uint64_t)config.cipher_algo_h << 32) |
+						config.cipher_algo_l;
+	dev->hash_algo = config.hash_algo;
+	dev->auth_algo = ((uint64_t)config.mac_algo_h << 32) |
+						config.mac_algo_l;
+	dev->aead_algo = config.aead_algo;
+	dev->akcipher_algo = config.akcipher_algo;
+	return 0;
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+
+	if (virtio_user_foreach_queue(dev, virtio_user_init_notify_queue) < 0)
+		goto err;
+
+	if (dev->device_features & (1ULL << VIRTIO_F_NOTIFICATION_DATA))
+		if (dev->ops->map_notification_area &&
+				dev->ops->map_notification_area(dev))
+			goto err;
+
+	return 0;
+err:
+	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+
+	return -1;
+}
+
+static void
+virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
+{
+	virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
+
+	if (dev->ops->unmap_notification_area && dev->notify_area)
+		dev->ops->unmap_notification_area(dev);
+}
+
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+			const void *addr,
+			size_t len __rte_unused,
+			void *arg)
+{
+	struct virtio_user_dev *dev = arg;
+	struct rte_memseg_list *msl;
+	uint16_t i;
+	int ret = 0;
+
+	/* ignore externally allocated memory */
+	msl = rte_mem_virt2memseg_list(addr);
+	if (msl->external)
+		return;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	if (dev->started == false)
+		goto exit;
+
+	/* Step 1: pause the active queues */
+	for (i = 0; i < dev->queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 0);
+		if (ret < 0)
+			goto exit;
+	}
+
+	/* Step 2: update memory regions */
+	ret = dev->ops->set_memory_table(dev);
+	if (ret < 0)
+		goto exit;
+
+	/* Step 3: resume the active queues */
+	for (i = 0; i < dev->queue_pairs; i++) {
+		ret = dev->ops->enable_qp(dev, i, 1);
+		if (ret < 0)
+			goto exit;
+	}
+
+exit:
+	pthread_mutex_unlock(&dev->mutex);
+
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path);
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+	if (dev->is_server) {
+		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
+			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
+			return -1;
+		}
+	}
+
+	switch (dev->backend_type) {
+	case VIRTIO_USER_BACKEND_VHOST_VDPA:
+		dev->ops = &virtio_ops_vdpa;
+		dev->ops->setup = virtio_crypto_ops_vdpa.setup;
+		dev->ops->get_features = virtio_crypto_ops_vdpa.get_features;
+		dev->ops->cvq_enable = virtio_crypto_ops_vdpa.cvq_enable;
+		dev->ops->enable_qp = virtio_crypto_ops_vdpa.enable_qp;
+		dev->ops->update_link_state = virtio_crypto_ops_vdpa.update_link_state;
+		dev->ops->map_notification_area = virtio_crypto_ops_vdpa.map_notification_area;
+		dev->ops->unmap_notification_area = virtio_crypto_ops_vdpa.unmap_notification_area;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
+		return -1;
+	}
+
+	if (dev->ops->setup(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+{
+	int i, size, nr_vrings;
+	bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED));
+
+	nr_vrings = dev->max_queue_pairs + 1;
+
+	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+	if (!dev->callfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
+		return -1;
+	}
+
+	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
+	if (!dev->kickfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
+		goto free_callfds;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		dev->callfds[i] = -1;
+		dev->kickfds[i] = -1;
+	}
+
+	if (packed_ring)
+		size = sizeof(*dev->vrings.packed);
+	else
+		size = sizeof(*dev->vrings.split);
+	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
+	if (!dev->vrings.ptr) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
+		goto free_kickfds;
+	}
+
+	if (packed_ring) {
+		dev->packed_queues = rte_zmalloc("virtio_user_dev",
+				nr_vrings * sizeof(*dev->packed_queues), 0);
+		if (!dev->packed_queues) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata",
+					dev->path);
+			goto free_vrings;
+		}
+	}
+
+	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
+			nr_vrings * sizeof(*dev->qp_enabled), 0);
+	if (!dev->qp_enabled) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
+		goto free_packed_queues;
+	}
+
+	return 0;
+
+free_packed_queues:
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+free_vrings:
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+free_kickfds:
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+free_callfds:
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+
+	return -1;
+}
+
+static void
+virtio_user_free_vrings(struct virtio_user_dev *dev)
+{
+	rte_free(dev->qp_enabled);
+	dev->qp_enabled = NULL;
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+}
+
+#define VIRTIO_USER_SUPPORTED_FEATURES   \
+	(1ULL << VIRTIO_CRYPTO_SERVICE_CIPHER     | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_HASH       | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_AKCIPHER   | \
+	 1ULL << VIRTIO_F_VERSION_1               | \
+	 1ULL << VIRTIO_F_IN_ORDER                | \
+	 1ULL << VIRTIO_F_RING_PACKED             | \
+	 1ULL << VIRTIO_F_NOTIFICATION_DATA       | \
+	 1ULL << VIRTIO_F_ORDER_PLATFORM)
+
+int
+crypto_virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+			int queue_size, int server)
+{
+	uint64_t backend_features;
+
+	pthread_mutex_init(&dev->mutex, NULL);
+	strlcpy(dev->path, path, PATH_MAX);
+
+	dev->started = 0;
+	dev->queue_pairs = 1; /* mq disabled by default */
+	dev->max_queue_pairs = queues; /* initialize to user requested value for kernel backend */
+	dev->queue_size = queue_size;
+	dev->is_server = server;
+	dev->frontend_features = 0;
+	dev->unsupported_features = 0;
+	dev->backend_type = VIRTIO_USER_BACKEND_VHOST_VDPA;
+	dev->hw.modern = 1;
+
+	if (virtio_user_dev_setup(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
+		return -1;
+	}
+
+	if (dev->ops->set_owner(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
+		goto destroy;
+	}
+
+	if (dev->ops->get_backend_features(&backend_features) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
+		goto destroy;
+	}
+
+	dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
+
+	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_max_queue_pairs(dev, queues)) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get max queue pairs", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_cipher_services(dev)) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get cipher services", dev->path);
+		goto destroy;
+	}
+
+	dev->frontend_features &= ~dev->unsupported_features;
+	dev->device_features &= ~dev->unsupported_features;
+
+	if (virtio_user_alloc_vrings(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
+		goto destroy;
+	}
+
+	if (virtio_user_dev_init_notify(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
+		goto free_vrings;
+	}
+
+	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+				virtio_user_mem_event_cb, dev)) {
+		if (rte_errno != ENOTSUP) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback",
+					dev->path);
+			goto notify_uninit;
+		}
+	}
+
+	return 0;
+
+notify_uninit:
+	virtio_user_dev_uninit_notify(dev);
+free_vrings:
+	virtio_user_free_vrings(dev);
+destroy:
+	dev->ops->destroy(dev);
+
+	return -1;
+}
+
+void
+crypto_virtio_user_dev_uninit(struct virtio_user_dev *dev)
+{
+	crypto_virtio_user_stop_device(dev);
+
+	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
+	virtio_user_dev_uninit_notify(dev);
+
+	virtio_user_free_vrings(dev);
+
+	if (dev->is_server)
+		unlink(dev->path);
+
+	dev->ops->destroy(dev);
+}
+
+#define CVQ_MAX_DATA_DESCS 32
+
+static inline void *
+virtio_user_iova2virt(struct virtio_user_dev *dev __rte_unused, rte_iova_t iova)
+{
+	if (rte_eal_iova_mode() == RTE_IOVA_VA)
+		return (void *)(uintptr_t)iova;
+	else
+		return rte_mem_iova2virt(iova);
+}
+
+static inline int
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+	uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
+
+	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
+		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
+}
+
+int
+crypto_virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
+{
+	int ret;
+
+	pthread_mutex_lock(&dev->mutex);
+	dev->status = status;
+	ret = dev->ops->set_status(dev, status);
+	if (ret && ret != -ENOTSUP)
+		PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
+
+	pthread_mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+int
+crypto_virtio_user_dev_update_status(struct virtio_user_dev *dev)
+{
+	int ret;
+	uint8_t status;
+
+	pthread_mutex_lock(&dev->mutex);
+
+	ret = dev->ops->get_status(dev, &status);
+	if (!ret) {
+		dev->status = status;
+		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):"
+			"\t-RESET: %u "
+			"\t-ACKNOWLEDGE: %u "
+			"\t-DRIVER: %u "
+			"\t-DRIVER_OK: %u "
+			"\t-FEATURES_OK: %u "
+			"\t-DEVICE_NEED_RESET: %u "
+			"\t-FAILED: %u",
+			dev->status,
+			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
+			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
+	} else if (ret != -ENOTSUP) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
+	}
+
+	pthread_mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+int
+crypto_virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
+{
+	if (dev->ops->update_link_state)
+		return dev->ops->update_link_state(dev);
+
+	return 0;
+}
diff --git a/drivers/crypto/virtio/virtio_user/virtio_user_dev.h b/drivers/crypto/virtio/virtio_user/virtio_user_dev.h
new file mode 100644
index 0000000000..ef648fd14b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user/virtio_user_dev.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Marvell.
+ */
+
+#ifndef _VIRTIO_USER_DEV_H
+#define _VIRTIO_USER_DEV_H
+
+#include <limits.h>
+#include <stdbool.h>
+
+#include "../virtio_pci.h"
+#include "../virtio_ring.h"
+
+extern struct virtio_user_backend_ops virtio_crypto_ops_vdpa;
+
+enum virtio_user_backend_type {
+	VIRTIO_USER_BACKEND_UNKNOWN,
+	VIRTIO_USER_BACKEND_VHOST_USER,
+	VIRTIO_USER_BACKEND_VHOST_VDPA,
+};
+
+struct virtio_user_queue {
+	uint16_t used_idx;
+	bool avail_wrap_counter;
+	bool used_wrap_counter;
+};
+
+struct virtio_user_dev {
+	union {
+		struct virtio_crypto_hw hw;
+		uint8_t dummy[256];
+	};
+
+	void		*backend_data;
+	uint16_t	**notify_area;
+	char		path[PATH_MAX];
+	bool		hw_cvq;
+	uint16_t	max_queue_pairs;
+	uint64_t	device_features; /* supported features by device */
+	bool		*qp_enabled;
+
+	enum virtio_user_backend_type backend_type;
+	bool		is_server;  /* server or client mode */
+
+	int		*callfds;
+	int		*kickfds;
+	uint16_t	queue_pairs;
+	uint32_t	queue_size;
+	uint64_t	features; /* the negotiated features with driver,
+				   * and will be sync with device
+				   */
+	uint64_t	frontend_features; /* enabled frontend features */
+	uint64_t	unsupported_features; /* unsupported features mask */
+	uint8_t		status;
+	uint32_t	crypto_status;
+	uint32_t	crypto_services;
+	uint64_t	cipher_algo;
+	uint32_t	hash_algo;
+	uint64_t	auth_algo;
+	uint32_t	aead_algo;
+	uint32_t	akcipher_algo;
+
+	union {
+		void			*ptr;
+		struct vring		*split;
+		struct vring_packed	*packed;
+	} vrings;
+
+	struct virtio_user_queue *packed_queues;
+
+	struct virtio_user_backend_ops *ops;
+	pthread_mutex_t	mutex;
+	bool		started;
+
+	struct virtqueue	*scvq;
+};
+
+int crypto_virtio_user_dev_set_features(struct virtio_user_dev *dev);
+int crypto_virtio_user_start_device(struct virtio_user_dev *dev);
+int crypto_virtio_user_stop_device(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
+			int queue_size, int server);
+void crypto_virtio_user_dev_uninit(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status);
+int crypto_virtio_user_dev_update_status(struct virtio_user_dev *dev);
+int crypto_virtio_user_dev_update_link_state(struct virtio_user_dev *dev);
+extern const char * const crypto_virtio_user_backend_strings[];
+#endif
diff --git a/drivers/crypto/virtio/virtio_user_cryptodev.c b/drivers/crypto/virtio/virtio_user_cryptodev.c
new file mode 100644
index 0000000000..606639b872
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_user_cryptodev.c
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Marvell
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <bus_vdev_driver.h>
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#include "virtio_user/virtio_user_dev.h"
+#include "virtio_user/vhost.h"
+#include "virtio_user/vhost_logs.h"
+#include "virtio_cryptodev.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+#define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw)
+
+static void
+virtio_user_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+		     void *dst, int length __rte_unused)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (offset == offsetof(struct virtio_crypto_config, status)) {
+		crypto_virtio_user_dev_update_link_state(dev);
+		*(uint32_t *)dst = dev->crypto_status;
+	} else if (offset == offsetof(struct virtio_crypto_config, max_dataqueues))
+		*(uint16_t *)dst = dev->max_queue_pairs;
+	else if (offset == offsetof(struct virtio_crypto_config, crypto_services))
+		*(uint32_t *)dst = dev->crypto_services;
+	else if (offset == offsetof(struct virtio_crypto_config, cipher_algo_l))
+		*(uint32_t *)dst = dev->cipher_algo & 0xFFFF;
+	else if (offset == offsetof(struct virtio_crypto_config, cipher_algo_h))
+		*(uint32_t *)dst = dev->cipher_algo >> 32;
+	else if (offset == offsetof(struct virtio_crypto_config, hash_algo))
+		*(uint32_t *)dst = dev->hash_algo;
+	else if (offset == offsetof(struct virtio_crypto_config, mac_algo_l))
+		*(uint32_t *)dst = dev->auth_algo & 0xFFFF;
+	else if (offset == offsetof(struct virtio_crypto_config, mac_algo_h))
+		*(uint32_t *)dst = dev->auth_algo >> 32;
+	else if (offset == offsetof(struct virtio_crypto_config, aead_algo))
+		*(uint32_t *)dst = dev->aead_algo;
+	else if (offset == offsetof(struct virtio_crypto_config, akcipher_algo))
+		*(uint32_t *)dst = dev->akcipher_algo;
+}
+
+static void
+virtio_user_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+		      const void *src, int length)
+{
+	RTE_SET_USED(hw);
+	RTE_SET_USED(src);
+
+	PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
+		    offset, length);
+}
+
+static void
+virtio_user_reset(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+		crypto_virtio_user_stop_device(dev);
+}
+
+static void
+virtio_user_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+	uint8_t old_status = dev->status;
+
+	if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK &&
+			~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) {
+		crypto_virtio_user_dev_set_features(dev);
+		/* Feature negotiation should be only done in probe time.
+		 * So we skip any more request here.
+		 */
+		dev->status |= VIRTIO_CONFIG_STATUS_FEATURES_OK;
+	}
+
+	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
+		if (crypto_virtio_user_start_device(dev)) {
+			crypto_virtio_user_dev_update_status(dev);
+			return;
+		}
+	} else if (status == VIRTIO_CONFIG_STATUS_RESET) {
+		virtio_user_reset(hw);
+	}
+
+	crypto_virtio_user_dev_set_status(dev, status);
+	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK && dev->scvq) {
+		if (dev->ops->cvq_enable(dev, 1) < 0) {
+			PMD_INIT_LOG(ERR, "(%s) Failed to start ctrlq", dev->path);
+			crypto_virtio_user_dev_update_status(dev);
+			return;
+		}
+	}
+}
+
+static uint8_t
+virtio_user_get_status(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	crypto_virtio_user_dev_update_status(dev);
+
+	return dev->status;
+}
+
+#define VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES   \
+	(1ULL << VIRTIO_CRYPTO_SERVICE_CIPHER     | \
+	 1ULL << VIRTIO_CRYPTO_SERVICE_AKCIPHER   | \
+	 1ULL << VIRTIO_F_VERSION_1               | \
+	 1ULL << VIRTIO_F_IN_ORDER                | \
+	 1ULL << VIRTIO_F_RING_PACKED             | \
+	 1ULL << VIRTIO_F_NOTIFICATION_DATA       | \
+	 1ULL << VIRTIO_RING_F_INDIRECT_DESC      | \
+	 1ULL << VIRTIO_F_ORDER_PLATFORM)
+
+static uint64_t
+virtio_user_get_features(struct virtio_crypto_hw *hw)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	/* unmask feature bits defined in vhost user protocol */
+	return (dev->device_features | dev->frontend_features) &
+		VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES;
+}
+
+static void
+virtio_user_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	dev->features = features & (dev->device_features | dev->frontend_features);
+}
+
+static uint8_t
+virtio_user_get_isr(struct virtio_crypto_hw *hw __rte_unused)
+{
+	/* rxq interrupts and config interrupt are separated in virtio-user,
+	 * here we only report config change.
+	 */
+	return VIRTIO_PCI_CAP_ISR_CFG;
+}
+
+static uint16_t
+virtio_user_set_config_irq(struct virtio_crypto_hw *hw __rte_unused,
+		    uint16_t vec __rte_unused)
+{
+	return 0;
+}
+
+static uint16_t
+virtio_user_set_queue_irq(struct virtio_crypto_hw *hw __rte_unused,
+			  struct virtqueue *vq __rte_unused,
+			  uint16_t vec)
+{
+	/* pretend we have done that */
+	return vec;
+}
+
+/* This function is to get the queue size, aka, number of descs, of a specified
+ * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
+ * max supported queues.
+ */
+static uint16_t
+virtio_user_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id __rte_unused)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	/* Currently, each queue has same queue size */
+	return dev->queue_size;
+}
+
+static void
+virtio_user_setup_queue_packed(struct virtqueue *vq,
+			       struct virtio_user_dev *dev)
+{
+	uint16_t queue_idx = vq->vq_queue_index;
+	struct vring_packed *vring;
+	uint64_t desc_addr;
+	uint64_t avail_addr;
+	uint64_t used_addr;
+	uint16_t i;
+
+	vring  = &dev->vrings.packed[queue_idx];
+	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+	avail_addr = desc_addr + vq->vq_nentries *
+		sizeof(struct vring_packed_desc);
+	used_addr = RTE_ALIGN_CEIL(avail_addr +
+			   sizeof(struct vring_packed_desc_event),
+			   VIRTIO_VRING_ALIGN);
+	vring->num = vq->vq_nentries;
+	vring->desc_iova = vq->vq_ring_mem;
+	vring->desc = (void *)(uintptr_t)desc_addr;
+	vring->driver = (void *)(uintptr_t)avail_addr;
+	vring->device = (void *)(uintptr_t)used_addr;
+	dev->packed_queues[queue_idx].avail_wrap_counter = true;
+	dev->packed_queues[queue_idx].used_wrap_counter = true;
+	dev->packed_queues[queue_idx].used_idx = 0;
+
+	for (i = 0; i < vring->num; i++)
+		vring->desc[i].flags = 0;
+}
+
+static void
+virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
+{
+	uint16_t queue_idx = vq->vq_queue_index;
+	uint64_t desc_addr, avail_addr, used_addr;
+
+	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+							 ring[vq->vq_nentries]),
+				   VIRTIO_VRING_ALIGN);
+
+	dev->vrings.split[queue_idx].num = vq->vq_nentries;
+	dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
+	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+	if (vtpci_with_packed_queue(hw))
+		virtio_user_setup_queue_packed(vq, dev);
+	else
+		virtio_user_setup_queue_split(vq, dev);
+
+	if (dev->notify_area)
+		vq->notify_addr = dev->notify_area[vq->vq_queue_index];
+
+	if (virtcrypto_cq_to_vq(hw->cvq) == vq)
+		dev->scvq = virtcrypto_cq_to_vq(hw->cvq);
+
+	return 0;
+}
+
+static void
+virtio_user_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
+	 * correspondingly stops the ioeventfds, and reset the status of
+	 * the device.
+	 * For modern devices, set queue desc, avail, used in PCI bar to 0,
+	 * not see any more behavior in QEMU.
+	 *
+	 * Here we just care about what information to deliver to vhost-user
+	 * or vhost-kernel. So we just close ioeventfd for now.
+	 */
+
+	RTE_SET_USED(hw);
+	RTE_SET_USED(vq);
+}
+
+static void
+virtio_user_notify_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+	uint64_t notify_data = 1;
+
+	if (!dev->notify_area) {
+		if (write(dev->kickfds[vq->vq_queue_index], &notify_data,
+			  sizeof(notify_data)) < 0)
+			PMD_DRV_LOG(ERR, "failed to kick backend: %s",
+				    strerror(errno));
+		return;
+	} else if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
+		rte_write16(vq->vq_queue_index, vq->notify_addr);
+		return;
+	}
+
+	if (vtpci_with_packed_queue(hw)) {
+		/* Bit[0:15]: vq queue index
+		 * Bit[16:30]: avail index
+		 * Bit[31]: avail wrap counter
+		 */
+		notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
+				VRING_PACKED_DESC_F_AVAIL)) << 31) |
+				((uint32_t)vq->vq_avail_idx << 16) |
+				vq->vq_queue_index;
+	} else {
+		/* Bit[0:15]: vq queue index
+		 * Bit[16:31]: avail index
+		 */
+		notify_data = ((uint32_t)vq->vq_avail_idx << 16) |
+				vq->vq_queue_index;
+	}
+	rte_write32(notify_data, vq->notify_addr);
+}
+
+const struct virtio_pci_ops crypto_virtio_user_ops = {
+	.read_dev_cfg	= virtio_user_read_dev_config,
+	.write_dev_cfg	= virtio_user_write_dev_config,
+	.reset		= virtio_user_reset,
+	.get_status	= virtio_user_get_status,
+	.set_status	= virtio_user_set_status,
+	.get_features	= virtio_user_get_features,
+	.set_features	= virtio_user_set_features,
+	.get_isr	= virtio_user_get_isr,
+	.set_config_irq	= virtio_user_set_config_irq,
+	.set_queue_irq	= virtio_user_set_queue_irq,
+	.get_queue_num	= virtio_user_get_queue_num,
+	.setup_queue	= virtio_user_setup_queue,
+	.del_queue	= virtio_user_del_queue,
+	.notify_queue	= virtio_user_notify_queue,
+};
+
+static const char * const valid_args[] = {
+#define VIRTIO_USER_ARG_QUEUES_NUM     "queues"
+	VIRTIO_USER_ARG_QUEUES_NUM,
+#define VIRTIO_USER_ARG_QUEUE_SIZE     "queue_size"
+	VIRTIO_USER_ARG_QUEUE_SIZE,
+#define VIRTIO_USER_ARG_PATH           "path"
+	VIRTIO_USER_ARG_PATH,
+#define VIRTIO_USER_ARG_SERVER_MODE    "server"
+	VIRTIO_USER_ARG_SERVER_MODE,
+	NULL
+};
+
+#define VIRTIO_USER_DEF_Q_NUM	1
+#define VIRTIO_USER_DEF_Q_SZ	256
+#define VIRTIO_USER_DEF_SERVER_MODE	0
+
+static int
+get_string_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	if (!value || !extra_args)
+		return -EINVAL;
+
+	*(char **)extra_args = strdup(value);
+
+	if (!*(char **)extra_args)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int
+get_integer_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	uint64_t integer = 0;
+	if (!value || !extra_args)
+		return -EINVAL;
+	errno = 0;
+	integer = strtoull(value, NULL, 0);
+	/* extra_args keeps default value, it should be replaced
+	 * only in case of successful parsing of the 'value' arg
+	 */
+	if (errno == 0)
+		*(uint64_t *)extra_args = integer;
+	return -errno;
+}
+
+static struct rte_cryptodev *
+virtio_user_cryptodev_alloc(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.private_data_size = sizeof(struct virtio_user_dev),
+	};
+	struct rte_cryptodev_data *data;
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	struct virtio_crypto_hw *hw;
+
+	init_params.socket_id = vdev->device.numa_node;
+	init_params.private_data_size = sizeof(struct virtio_user_dev);
+	cryptodev = rte_cryptodev_pmd_create(vdev->device.name, &vdev->device, &init_params);
+	if (cryptodev == NULL) {
+		PMD_INIT_LOG(ERR, "failed to create cryptodev vdev");
+		return NULL;
+	}
+
+	data = cryptodev->data;
+	dev = data->dev_private;
+	hw = &dev->hw;
+
+	hw->dev_id = data->dev_id;
+	VTPCI_OPS(hw) = &crypto_virtio_user_ops;
+
+	return cryptodev;
+}
+
+static void
+virtio_user_cryptodev_free(struct rte_cryptodev *cryptodev)
+{
+	rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static int
+virtio_user_pmd_probe(struct rte_vdev_device *vdev)
+{
+	uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+	uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+	uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
+	struct rte_cryptodev *cryptodev = NULL;
+	struct rte_kvargs *kvlist = NULL;
+	struct virtio_user_dev *dev;
+	char *path = NULL;
+	int ret;
+
+	kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args);
+
+	if (!kvlist) {
+		PMD_INIT_LOG(ERR, "error when parsing param");
+		goto end;
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
+					&get_string_arg, &path) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_PATH);
+			goto end;
+		}
+	} else {
+		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
+				VIRTIO_USER_ARG_PATH);
+		goto end;
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
+					&get_integer_arg, &queues) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_QUEUES_NUM);
+			goto end;
+		}
+	}
+
+	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
+		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
+					&get_integer_arg, &queue_size) < 0) {
+			PMD_INIT_LOG(ERR, "error to parse %s",
+					VIRTIO_USER_ARG_QUEUE_SIZE);
+			goto end;
+		}
+	}
+
+	cryptodev = virtio_user_cryptodev_alloc(vdev);
+	if (!cryptodev) {
+		PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+		goto end;
+	}
+
+	dev = cryptodev->data->dev_private;
+	if (crypto_virtio_user_dev_init(dev, path, queues, queue_size,
+			server_mode) < 0) {
+		PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+		virtio_user_cryptodev_free(cryptodev);
+		goto end;
+	}
+
+	if (crypto_virtio_dev_init(cryptodev, VIRTIO_USER_CRYPTO_PMD_GUEST_FEATURES,
+			NULL) < 0) {
+		PMD_INIT_LOG(ERR, "crypto_virtio_dev_init fails");
+		crypto_virtio_user_dev_uninit(dev);
+		virtio_user_cryptodev_free(cryptodev);
+		goto end;
+	}
+
+	rte_cryptodev_pmd_probing_finish(cryptodev);
+
+	ret = 0;
+end:
+	rte_kvargs_free(kvlist);
+	free(path);
+	return ret;
+}
+
+static int
+virtio_user_pmd_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	const char *name;
+	int devid;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	PMD_DRV_LOG(INFO, "Removing %s", name);
+
+	devid = rte_cryptodev_get_dev_id(name);
+	if (devid < 0)
+		return -EINVAL;
+
+	rte_cryptodev_stop(devid);
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (rte_cryptodev_pmd_destroy(cryptodev) < 0) {
+		PMD_DRV_LOG(ERR, "Failed to remove %s", name);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
+		uint64_t iova, size_t len)
+{
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	const char *name;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -EINVAL;
+
+	dev = cryptodev->data->dev_private;
+
+	if (dev->ops->dma_map)
+		return dev->ops->dma_map(dev, addr, iova, len);
+
+	return 0;
+}
+
+static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr,
+		uint64_t iova, size_t len)
+{
+	struct rte_cryptodev *cryptodev;
+	struct virtio_user_dev *dev;
+	const char *name;
+
+	if (!vdev)
+		return -EINVAL;
+
+	name = rte_vdev_device_name(vdev);
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -EINVAL;
+
+	dev = cryptodev->data->dev_private;
+
+	if (dev->ops->dma_unmap)
+		return dev->ops->dma_unmap(dev, addr, iova, len);
+
+	return 0;
+}
+
+static struct rte_vdev_driver virtio_user_driver = {
+	.probe = virtio_user_pmd_probe,
+	.remove = virtio_user_pmd_remove,
+	.dma_map = virtio_user_pmd_dma_map,
+	.dma_unmap = virtio_user_pmd_dma_unmap,
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(crypto_virtio_user, virtio_user_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+	virtio_user_driver.driver,
+	cryptodev_virtio_driver_id);
+RTE_PMD_REGISTER_ALIAS(crypto_virtio_user, crypto_virtio);
+RTE_PMD_REGISTER_PARAM_STRING(crypto_virtio_user,
+	"path=<path> "
+	"queues=<int> "
+	"queue_size=<int>");
-- 
2.25.1


^ permalink raw reply	[relevance 1%]

* [PATCH v2] ring: add the second version of the RTS interface
  2025-01-05 15:13  5% ` [PATCH v2] " Huichao Cai
@ 2025-01-08  1:41  3%   ` Huichao Cai
  2025-01-14 15:04  0%     ` Thomas Monjalon
  0 siblings, 1 reply; 169+ results
From: Huichao Cai @ 2025-01-08  1:41 UTC (permalink / raw)
  To: thomas; +Cc: dev, honnappa.nagarahalli, konstantin.v.ananyev

Hi,Thomas
    This patch adds a field to the ABI structure.I have added the suppress_type
field in the file libabigail.abignore, but "ci/github-robot: Build" still reported
an error, could you please advise on how to fill in the suppress_type field?


^ permalink raw reply	[relevance 3%]

* Re: [PATCH v8 27/29] lib/net: replace packed attributes
  @ 2025-01-08 12:01  3%     ` David Marchand
  2025-01-09  2:49  0%       ` Andre Muezerie
  0 siblings, 1 reply; 169+ results
From: David Marchand @ 2025-01-08 12:01 UTC (permalink / raw)
  To: Andre Muezerie; +Cc: roretzla, dev, Thomas Monjalon, Robin Jarry

On Tue, Dec 31, 2024 at 7:40 PM Andre Muezerie
<andremue@linux.microsoft.com> wrote:
> diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
> index 992ab5ee1f..92558a124a 100644
> --- a/lib/net/rte_ip6.h
> +++ b/lib/net/rte_ip6.h
> @@ -358,7 +358,7 @@ enum rte_ipv6_mc_scope {
>         RTE_IPV6_MC_SCOPE_ORGLOCAL = 0x08,
>         /** Global multicast scope. */
>         RTE_IPV6_MC_SCOPE_GLOBAL = 0x0e,
> -} __rte_packed;
> +};
>
>  /**
>   * Extract the IPv6 multicast scope value as defined in RFC 4291, section 2.7.

Cc: Robin for info.

This change affects the storage size of a variable of this type (at
least with gcc).
I think it is ok from an ABI pov: there is one (inline) helper using
this type, and nothing else in DPDK takes a IPv6 multicast scope as
input.

However, it deserves a mention in the commitlog (maybe a separate
commit to highlight it?).


-- 
David Marchand


^ permalink raw reply	[relevance 3%]

* [PATCH v10 27/30] lib/net: replace packed attributes
  @ 2025-01-09  2:46  1%   ` Andre Muezerie
  0 siblings, 0 replies; 169+ results
From: Andre Muezerie @ 2025-01-09  2:46 UTC (permalink / raw)
  To: roretzla
  Cc: aman.deep.singh, anatoly.burakov, bruce.richardson, byron.marohn,
	conor.walsh, cristian.dumitrescu, david.hunt, dev, dsosnowski,
	gakhil, jerinj, jingjing.wu, kirill.rybalchenko,
	konstantin.v.ananyev, matan, mb, orika, radu.nicolau,
	ruifeng.wang, sameh.gobriel, sivaprasad.tummala, skori, stephen,
	suanmingm, vattunuru, viacheslavo, vladimir.medvedkin,
	yipeng1.wang, Andre Muezerie

MSVC struct packing is not compatible with GCC. Replace macro
__rte_packed with __rte_packed_begin to push existing pack value
and set packing to 1-byte and macro __rte_packed_end to restore
the pack value prior to the push.

Macro __rte_packed_end is deliberately utilized to trigger a
MSVC compiler warning if no existing packing has been pushed allowing
easy identification of locations where the __rte_packed_begin is
missing.

This change affects the storage size of a variable of enum
rte_ipv6_mc_scope (at least with gcc). It should be OK from an ABI POV
though: there is one (inline) helper using this type, and nothing else
in DPDK takes a IPv6 multicast scope as input.

Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
---
 lib/net/rte_arp.h      |  8 ++++----
 lib/net/rte_dtls.h     |  4 ++--
 lib/net/rte_esp.h      |  8 ++++----
 lib/net/rte_geneve.h   |  4 ++--
 lib/net/rte_gre.h      | 16 ++++++++--------
 lib/net/rte_gtp.h      | 20 ++++++++++----------
 lib/net/rte_ib.h       |  4 ++--
 lib/net/rte_icmp.h     | 12 ++++++------
 lib/net/rte_ip4.h      |  4 ++--
 lib/net/rte_ip6.h      | 14 +++++++-------
 lib/net/rte_l2tpv2.h   | 16 ++++++++--------
 lib/net/rte_macsec.h   |  8 ++++----
 lib/net/rte_mpls.h     |  4 ++--
 lib/net/rte_pdcp_hdr.h | 16 ++++++++--------
 lib/net/rte_ppp.h      |  4 ++--
 lib/net/rte_sctp.h     |  4 ++--
 lib/net/rte_tcp.h      |  4 ++--
 lib/net/rte_tls.h      |  4 ++--
 lib/net/rte_udp.h      |  4 ++--
 lib/net/rte_vxlan.h    | 28 ++++++++++++++--------------
 20 files changed, 93 insertions(+), 93 deletions(-)

diff --git a/lib/net/rte_arp.h b/lib/net/rte_arp.h
index 668cea1704..e885a71292 100644
--- a/lib/net/rte_arp.h
+++ b/lib/net/rte_arp.h
@@ -21,17 +21,17 @@ extern "C" {
 /**
  * ARP header IPv4 payload.
  */
-struct __rte_aligned(2) rte_arp_ipv4 {
+struct __rte_aligned(2) __rte_packed_begin rte_arp_ipv4 {
 	struct rte_ether_addr arp_sha;  /**< sender hardware address */
 	rte_be32_t            arp_sip;  /**< sender IP address */
 	struct rte_ether_addr arp_tha;  /**< target hardware address */
 	rte_be32_t            arp_tip;  /**< target IP address */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ARP header.
  */
-struct __rte_aligned(2) rte_arp_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_arp_hdr {
 	rte_be16_t arp_hardware; /**< format of hardware address */
 #define RTE_ARP_HRD_ETHER     1  /**< ARP Ethernet address format */
 
@@ -47,7 +47,7 @@ struct __rte_aligned(2) rte_arp_hdr {
 #define	RTE_ARP_OP_INVREPLY   9  /**< response identifying peer */
 
 	struct rte_arp_ipv4 arp_data;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Make a RARP packet based on MAC addr.
diff --git a/lib/net/rte_dtls.h b/lib/net/rte_dtls.h
index 246cd8a72d..1dd95ce899 100644
--- a/lib/net/rte_dtls.h
+++ b/lib/net/rte_dtls.h
@@ -30,7 +30,7 @@
  * DTLS Header
  */
 __extension__
-struct rte_dtls_hdr {
+struct __rte_packed_begin rte_dtls_hdr {
 	/** Content type of DTLS packet. Defined as RTE_DTLS_TYPE_*. */
 	uint8_t type;
 	/** DTLS Version defined as RTE_DTLS_VERSION*. */
@@ -48,6 +48,6 @@ struct rte_dtls_hdr {
 #endif
 	/** The length (in bytes) of the following DTLS packet. */
 	rte_be16_t length;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_DTLS_H */
diff --git a/lib/net/rte_esp.h b/lib/net/rte_esp.h
index 745a9847fe..2a0002f4d9 100644
--- a/lib/net/rte_esp.h
+++ b/lib/net/rte_esp.h
@@ -16,17 +16,17 @@
 /**
  * ESP Header
  */
-struct rte_esp_hdr {
+struct __rte_packed_begin rte_esp_hdr {
 	rte_be32_t spi;  /**< Security Parameters Index */
 	rte_be32_t seq;  /**< packet sequence number */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ESP Trailer
  */
-struct rte_esp_tail {
+struct __rte_packed_begin rte_esp_tail {
 	uint8_t pad_len;     /**< number of pad bytes (0-255) */
 	uint8_t next_proto;  /**< IPv4 or IPv6 or next layer header */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_ESP_H_ */
diff --git a/lib/net/rte_geneve.h b/lib/net/rte_geneve.h
index eb2c85f1e9..f962c587ee 100644
--- a/lib/net/rte_geneve.h
+++ b/lib/net/rte_geneve.h
@@ -34,7 +34,7 @@
  * More-bits (optional) variable length options.
  */
 __extension__
-struct rte_geneve_hdr {
+struct __rte_packed_begin rte_geneve_hdr {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t ver:2;		/**< Version. */
 	uint8_t opt_len:6;	/**< Options length. */
@@ -52,7 +52,7 @@ struct rte_geneve_hdr {
 	uint8_t vni[3];		/**< Virtual network identifier. */
 	uint8_t reserved2;	/**< Reserved. */
 	uint32_t opts[];	/**< Variable length options. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* GENEVE ETH next protocol types */
 #define RTE_GENEVE_TYPE_ETH	0x6558 /**< Ethernet Protocol. */
diff --git a/lib/net/rte_gre.h b/lib/net/rte_gre.h
index 1483e1b42d..768c4ce7b5 100644
--- a/lib/net/rte_gre.h
+++ b/lib/net/rte_gre.h
@@ -23,7 +23,7 @@
  * GRE Header
  */
 __extension__
-struct rte_gre_hdr {
+struct __rte_packed_begin rte_gre_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint16_t res2:4; /**< Reserved */
 	uint16_t s:1;    /**< Sequence Number Present bit */
@@ -42,28 +42,28 @@ struct rte_gre_hdr {
 	uint16_t ver:3;  /**< Version Number */
 #endif
 	rte_be16_t proto;  /**< Protocol Type */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field checksum in GRE header
  */
-struct rte_gre_hdr_opt_checksum_rsvd {
+struct __rte_packed_begin rte_gre_hdr_opt_checksum_rsvd {
 	rte_be16_t checksum;
 	rte_be16_t reserved1;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field key in GRE header
  */
-struct rte_gre_hdr_opt_key {
+struct __rte_packed_begin rte_gre_hdr_opt_key {
 	rte_be32_t key;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field sequence in GRE header
  */
-struct rte_gre_hdr_opt_sequence {
+struct __rte_packed_begin rte_gre_hdr_opt_sequence {
 	rte_be32_t sequence;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_GRE_H_ */
diff --git a/lib/net/rte_gtp.h b/lib/net/rte_gtp.h
index ab06e23a6e..0332d35c16 100644
--- a/lib/net/rte_gtp.h
+++ b/lib/net/rte_gtp.h
@@ -24,7 +24,7 @@
  * No optional fields and next extension header.
  */
 __extension__
-struct rte_gtp_hdr {
+struct __rte_packed_begin rte_gtp_hdr {
 	union {
 		uint8_t gtp_hdr_info; /**< GTP header info */
 		struct {
@@ -48,21 +48,21 @@ struct rte_gtp_hdr {
 	uint8_t msg_type;     /**< GTP message type */
 	rte_be16_t plen;      /**< Total payload length */
 	rte_be32_t teid;      /**< Tunnel endpoint ID */
-} __rte_packed;
+} __rte_packed_end;
 
 /* Optional word of GTP header, present if any of E, S, PN is set. */
-struct rte_gtp_hdr_ext_word {
+struct __rte_packed_begin rte_gtp_hdr_ext_word {
 	rte_be16_t sqn;	      /**< Sequence Number. */
 	uint8_t npdu;	      /**< N-PDU number. */
 	uint8_t next_ext;     /**< Next Extension Header Type. */
-}  __rte_packed;
+}  __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * defined based on RFC 38415-g30.
  */
 __extension__
-struct rte_gtp_psc_generic_hdr {
+struct __rte_packed_begin rte_gtp_psc_generic_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -78,14 +78,14 @@ struct rte_gtp_psc_generic_hdr {
 	uint8_t spare:2;	/**< type specific spare bits */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * type0 defined based on RFC 38415-g30
  */
 __extension__
-struct rte_gtp_psc_type0_hdr {
+struct __rte_packed_begin rte_gtp_psc_type0_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -105,14 +105,14 @@ struct rte_gtp_psc_type0_hdr {
 	uint8_t ppp:1;		/**< Paging policy presence */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * type1 defined based on RFC 38415-g30
  */
 __extension__
-struct rte_gtp_psc_type1_hdr {
+struct __rte_packed_begin rte_gtp_psc_type1_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -134,7 +134,7 @@ struct rte_gtp_psc_type1_hdr {
 	uint8_t n_delay_ind:1;	/**< N3/N9 delay result presence */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /** GTP header length */
 #define RTE_ETHER_GTP_HLEN \
diff --git a/lib/net/rte_ib.h b/lib/net/rte_ib.h
index a551f3753f..f1b455cea0 100644
--- a/lib/net/rte_ib.h
+++ b/lib/net/rte_ib.h
@@ -22,7 +22,7 @@
  * IB Specification Vol 1-Release-1.4.
  */
 __extension__
-struct rte_ib_bth {
+struct __rte_packed_begin rte_ib_bth {
 	uint8_t	opcode;		/**< Opcode. */
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t	tver:4;		/**< Transport Header Version. */
@@ -54,7 +54,7 @@ struct rte_ib_bth {
 	uint8_t	rsvd1:7;	/**< Reserved. */
 #endif
 	uint8_t	psn[3];		/**< Packet Sequence Number */
-} __rte_packed;
+} __rte_packed_end;
 
 /** RoCEv2 default port. */
 #define RTE_ROCEV2_DEFAULT_PORT 4791
diff --git a/lib/net/rte_icmp.h b/lib/net/rte_icmp.h
index e69d68ab6e..cca73b3733 100644
--- a/lib/net/rte_icmp.h
+++ b/lib/net/rte_icmp.h
@@ -21,33 +21,33 @@
 /**
  * ICMP base header
  */
-struct rte_icmp_base_hdr {
+struct __rte_packed_begin rte_icmp_base_hdr {
 	uint8_t type;
 	uint8_t code;
 	rte_be16_t checksum;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ICMP echo header
  */
-struct rte_icmp_echo_hdr {
+struct __rte_packed_begin rte_icmp_echo_hdr {
 	struct rte_icmp_base_hdr base;
 	rte_be16_t identifier;
 	rte_be16_t sequence;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ICMP Header
  *
  * @see rte_icmp_echo_hdr which is similar.
  */
-struct rte_icmp_hdr {
+struct __rte_packed_begin rte_icmp_hdr {
 	uint8_t  icmp_type;     /* ICMP packet type. */
 	uint8_t  icmp_code;     /* ICMP packet code. */
 	rte_be16_t icmp_cksum;  /* ICMP packet checksum. */
 	rte_be16_t icmp_ident;  /* ICMP packet identifier. */
 	rte_be16_t icmp_seq_nb; /* ICMP packet sequence number. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* ICMP packet types */
 #define RTE_ICMP_TYPE_ECHO_REPLY 0
diff --git a/lib/net/rte_ip4.h b/lib/net/rte_ip4.h
index f9b8333332..d4b38c513c 100644
--- a/lib/net/rte_ip4.h
+++ b/lib/net/rte_ip4.h
@@ -39,7 +39,7 @@ extern "C" {
 /**
  * IPv4 Header
  */
-struct __rte_aligned(2) rte_ipv4_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv4_hdr {
 	__extension__
 	union {
 		uint8_t version_ihl;    /**< version and header length */
@@ -62,7 +62,7 @@ struct __rte_aligned(2) rte_ipv4_hdr {
 	rte_be16_t hdr_checksum;	/**< header checksum */
 	rte_be32_t src_addr;		/**< source address */
 	rte_be32_t dst_addr;		/**< destination address */
-} __rte_packed;
+} __rte_packed_end;
 
 /** Create IPv4 address */
 #define RTE_IPV4(a, b, c, d) ((uint32_t)(((a) & 0xff) << 24) | \
diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 992ab5ee1f..92558a124a 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -358,7 +358,7 @@ enum rte_ipv6_mc_scope {
 	RTE_IPV6_MC_SCOPE_ORGLOCAL = 0x08,
 	/** Global multicast scope. */
 	RTE_IPV6_MC_SCOPE_GLOBAL = 0x0e,
-} __rte_packed;
+};
 
 /**
  * Extract the IPv6 multicast scope value as defined in RFC 4291, section 2.7.
@@ -461,7 +461,7 @@ rte_ether_mcast_from_ipv6(struct rte_ether_addr *mac, const struct rte_ipv6_addr
 /**
  * IPv6 Header
  */
-struct __rte_aligned(2) rte_ipv6_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_hdr {
 	union {
 		rte_be32_t vtc_flow;        /**< IP version, traffic class & flow label. */
 		__extension__
@@ -484,7 +484,7 @@ struct __rte_aligned(2) rte_ipv6_hdr {
 	uint8_t  hop_limits;	/**< Hop limits. */
 	struct rte_ipv6_addr src_addr;	/**< IP address of source host. */
 	struct rte_ipv6_addr dst_addr;	/**< IP address of destination host(s). */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Check that the IPv6 header version field is valid according to RFC 8200 section 3.
@@ -508,7 +508,7 @@ static inline int rte_ipv6_check_version(const struct rte_ipv6_hdr *ip)
 /**
  * IPv6 Routing Extension Header
  */
-struct __rte_aligned(2) rte_ipv6_routing_ext {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_routing_ext {
 	uint8_t next_hdr;			/**< Protocol, next header. */
 	uint8_t hdr_len;			/**< Header length. */
 	uint8_t type;				/**< Extension header type. */
@@ -523,7 +523,7 @@ struct __rte_aligned(2) rte_ipv6_routing_ext {
 		};
 	};
 	/* Next are 128-bit IPv6 address fields to describe segments. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* IPv6 vtc_flow: IPv / TC / flow_label */
 #define RTE_IPV6_HDR_FL_SHIFT 0
@@ -752,12 +752,12 @@ rte_ipv6_udptcp_cksum_mbuf_verify(const struct rte_mbuf *m,
 #define RTE_IPV6_SET_FRAG_DATA(fo, mf)	\
 	(((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK))
 
-struct __rte_aligned(2) rte_ipv6_fragment_ext {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_fragment_ext {
 	uint8_t next_header;	/**< Next header type */
 	uint8_t reserved;	/**< Reserved */
 	rte_be16_t frag_data;	/**< All fragmentation data */
 	rte_be32_t id;		/**< Packet ID */
-} __rte_packed;
+} __rte_packed_end;
 
 /* IPv6 fragment extension header size */
 #define RTE_IPV6_FRAG_HDR_SIZE	sizeof(struct rte_ipv6_fragment_ext)
diff --git a/lib/net/rte_l2tpv2.h b/lib/net/rte_l2tpv2.h
index ac16657856..728dc01506 100644
--- a/lib/net/rte_l2tpv2.h
+++ b/lib/net/rte_l2tpv2.h
@@ -125,7 +125,7 @@ struct rte_l2tpv2_common_hdr {
  * L2TPv2 message Header contains all options(length, ns, nr,
  * offset size, offset padding).
  */
-struct rte_l2tpv2_msg_with_all_options {
+struct __rte_packed_begin rte_l2tpv2_msg_with_all_options {
 	rte_be16_t length;		/**< length(16) */
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
@@ -133,20 +133,20 @@ struct rte_l2tpv2_msg_with_all_options {
 	rte_be16_t nr;			/**< Nr(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains all options except length(ns, nr,
  * offset size, offset padding).
  */
-struct rte_l2tpv2_msg_without_length {
+struct __rte_packed_begin rte_l2tpv2_msg_without_length {
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
 	rte_be16_t ns;			/**< Ns(16) */
 	rte_be16_t nr;			/**< Nr(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains all options except ns_nr(length,
@@ -176,12 +176,12 @@ struct rte_l2tpv2_msg_without_offset {
 /**
  * L2TPv2 message Header contains options offset size and offset padding.
  */
-struct rte_l2tpv2_msg_with_offset {
+struct __rte_packed_begin rte_l2tpv2_msg_with_offset {
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains options ns and nr.
@@ -213,7 +213,7 @@ struct rte_l2tpv2_msg_without_all_options {
 /**
  * L2TPv2 Combined Message Header Format: Common Header + Options
  */
-struct rte_l2tpv2_combined_msg_hdr {
+struct __rte_packed_begin rte_l2tpv2_combined_msg_hdr {
 	struct rte_l2tpv2_common_hdr common; /**< common header */
 	union {
 		/** header with all options */
@@ -233,6 +233,6 @@ struct rte_l2tpv2_combined_msg_hdr {
 		/** header without all options */
 		struct rte_l2tpv2_msg_without_all_options type7;
 	};
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* _RTE_L2TPV2_H_ */
diff --git a/lib/net/rte_macsec.h b/lib/net/rte_macsec.h
index beeeb8effe..c694c37b4b 100644
--- a/lib/net/rte_macsec.h
+++ b/lib/net/rte_macsec.h
@@ -25,7 +25,7 @@
  * MACsec Header (SecTAG)
  */
 __extension__
-struct rte_macsec_hdr {
+struct __rte_packed_begin rte_macsec_hdr {
 	/**
 	 * Tag control information and Association number of secure channel.
 	 * Various bits of TCI and AN are masked using RTE_MACSEC_TCI_* and RTE_MACSEC_AN_MASK.
@@ -39,7 +39,7 @@ struct rte_macsec_hdr {
 	uint8_t short_length:6; /**< Short Length. */
 #endif
 	rte_be32_t packet_number; /**< Packet number to support replay protection. */
-} __rte_packed;
+} __rte_packed_end;
 
 /** SCI length in MACsec header if present. */
 #define RTE_MACSEC_SCI_LEN 8
@@ -48,8 +48,8 @@ struct rte_macsec_hdr {
  * MACsec SCI header (8 bytes) after the MACsec header
  * which is present if SC bit is set in tci_an.
  */
-struct rte_macsec_sci_hdr {
+struct __rte_packed_begin rte_macsec_sci_hdr {
 	uint8_t sci[RTE_MACSEC_SCI_LEN]; /**< Optional secure channel ID. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_MACSEC_H */
diff --git a/lib/net/rte_mpls.h b/lib/net/rte_mpls.h
index 35a356efd3..53614a0b88 100644
--- a/lib/net/rte_mpls.h
+++ b/lib/net/rte_mpls.h
@@ -18,7 +18,7 @@
  * MPLS header.
  */
 __extension__
-struct rte_mpls_hdr {
+struct __rte_packed_begin rte_mpls_hdr {
 	rte_be16_t tag_msb; /**< Label(msb). */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t tag_lsb:4;  /**< Label(lsb). */
@@ -30,6 +30,6 @@ struct rte_mpls_hdr {
 	uint8_t tag_lsb:4;  /**< label(lsb) */
 #endif
 	uint8_t  ttl;       /**< Time to live. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_MPLS_H_ */
diff --git a/lib/net/rte_pdcp_hdr.h b/lib/net/rte_pdcp_hdr.h
index c22b66bf93..2e8da1e1d3 100644
--- a/lib/net/rte_pdcp_hdr.h
+++ b/lib/net/rte_pdcp_hdr.h
@@ -56,7 +56,7 @@ enum rte_pdcp_pdu_type {
  * 6.2.2.1 Data PDU for SRBs
  */
 __extension__
-struct rte_pdcp_cp_data_pdu_sn_12_hdr {
+struct __rte_packed_begin rte_pdcp_cp_data_pdu_sn_12_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 	uint8_t r : 4;		/**< Reserved */
@@ -65,13 +65,13 @@ struct rte_pdcp_cp_data_pdu_sn_12_hdr {
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 #endif
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.2.2 Data PDU for DRBs and MRBs with 12 bits PDCP SN
  */
 __extension__
-struct rte_pdcp_up_data_pdu_sn_12_hdr {
+struct __rte_packed_begin rte_pdcp_up_data_pdu_sn_12_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 	uint8_t r : 3;		/**< Reserved */
@@ -82,13 +82,13 @@ struct rte_pdcp_up_data_pdu_sn_12_hdr {
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 #endif
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.2.3 Data PDU for DRBs and MRBs with 18 bits PDCP SN
  */
 __extension__
-struct rte_pdcp_up_data_pdu_sn_18_hdr {
+struct __rte_packed_begin rte_pdcp_up_data_pdu_sn_18_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_17_16 : 2;	/**< Sequence number bits 16-17 */
 	uint8_t r : 5;		/**< Reserved */
@@ -100,13 +100,13 @@ struct rte_pdcp_up_data_pdu_sn_18_hdr {
 #endif
 	uint8_t sn_15_8;	/**< Sequence number bits 8-15 */
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.3.1 Control PDU for PDCP status report
  */
 __extension__
-struct rte_pdcp_up_ctrl_pdu_hdr {
+struct __rte_packed_begin rte_pdcp_up_ctrl_pdu_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t r : 4;		/**< Reserved */
 	uint8_t pdu_type : 3;	/**< Control PDU type */
@@ -134,6 +134,6 @@ struct rte_pdcp_up_ctrl_pdu_hdr {
 	 * in the Bitmap is 1.
 	 */
 	uint8_t bitmap[];
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_PDCP_HDR_H */
diff --git a/lib/net/rte_ppp.h b/lib/net/rte_ppp.h
index 63c72a9392..02bfb03c03 100644
--- a/lib/net/rte_ppp.h
+++ b/lib/net/rte_ppp.h
@@ -17,10 +17,10 @@
 /**
  * PPP Header
  */
-struct rte_ppp_hdr {
+struct __rte_packed_begin rte_ppp_hdr {
 	uint8_t addr; /**< PPP address(8) */
 	uint8_t ctrl; /**< PPP control(8) */
 	rte_be16_t proto_id; /**< PPP protocol identifier(16) */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* _RTE_PPP_H_ */
diff --git a/lib/net/rte_sctp.h b/lib/net/rte_sctp.h
index e757c57db3..73051b94fd 100644
--- a/lib/net/rte_sctp.h
+++ b/lib/net/rte_sctp.h
@@ -21,11 +21,11 @@
 /**
  * SCTP Header
  */
-struct rte_sctp_hdr {
+struct __rte_packed_begin rte_sctp_hdr {
 	rte_be16_t src_port; /**< Source port. */
 	rte_be16_t dst_port; /**< Destin port. */
 	rte_be32_t tag;      /**< Validation tag. */
 	rte_be32_t cksum;    /**< Checksum. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_SCTP_H_ */
diff --git a/lib/net/rte_tcp.h b/lib/net/rte_tcp.h
index 1bcacbf038..fb0eb308f5 100644
--- a/lib/net/rte_tcp.h
+++ b/lib/net/rte_tcp.h
@@ -21,7 +21,7 @@
 /**
  * TCP Header
  */
-struct rte_tcp_hdr {
+struct __rte_packed_begin rte_tcp_hdr {
 	rte_be16_t src_port; /**< TCP source port. */
 	rte_be16_t dst_port; /**< TCP destination port. */
 	rte_be32_t sent_seq; /**< TX data sequence number. */
@@ -31,7 +31,7 @@ struct rte_tcp_hdr {
 	rte_be16_t rx_win;   /**< RX flow control window. */
 	rte_be16_t cksum;    /**< TCP checksum. */
 	rte_be16_t tcp_urp;  /**< TCP urgent pointer, if any. */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * TCP Flags
diff --git a/lib/net/rte_tls.h b/lib/net/rte_tls.h
index 595567e3e9..f27db3acb1 100644
--- a/lib/net/rte_tls.h
+++ b/lib/net/rte_tls.h
@@ -28,13 +28,13 @@
  * TLS Header
  */
 __extension__
-struct rte_tls_hdr {
+struct __rte_packed_begin rte_tls_hdr {
 	/** Content type of TLS packet. Defined as RTE_TLS_TYPE_*. */
 	uint8_t type;
 	/** TLS Version defined as RTE_TLS_VERSION*. */
 	rte_be16_t version;
 	/** The length (in bytes) of the following TLS packet. */
 	rte_be16_t length;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_TLS_H */
diff --git a/lib/net/rte_udp.h b/lib/net/rte_udp.h
index c01dad9c9b..94f5304e6d 100644
--- a/lib/net/rte_udp.h
+++ b/lib/net/rte_udp.h
@@ -21,11 +21,11 @@
 /**
  * UDP Header
  */
-struct rte_udp_hdr {
+struct __rte_packed_begin rte_udp_hdr {
 	rte_be16_t src_port;    /**< UDP source port. */
 	rte_be16_t dst_port;    /**< UDP destination port. */
 	rte_be16_t dgram_len;   /**< UDP datagram length */
 	rte_be16_t dgram_cksum; /**< UDP datagram checksum */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_UDP_H_ */
diff --git a/lib/net/rte_vxlan.h b/lib/net/rte_vxlan.h
index bd1c89835e..f59829b182 100644
--- a/lib/net/rte_vxlan.h
+++ b/lib/net/rte_vxlan.h
@@ -27,13 +27,13 @@
  * Reserved fields (24 bits and 8 bits)
  */
 __extension__ /* no named member in struct */
-struct rte_vxlan_hdr {
+struct __rte_packed_begin rte_vxlan_hdr {
 	union {
 		rte_be32_t vx_flags; /**< flags (8 bits) + extensions (24 bits). */
-		struct {
+		struct __rte_packed_begin {
 			union {
 				uint8_t flags; /**< Default is I bit, others are extensions. */
-				struct {
+				struct __rte_packed_begin {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 					uint8_t flag_g:1,     /**< GBP bit. */
 						flag_rsvd:1,  /*   Reserved. */
@@ -51,11 +51,11 @@ struct rte_vxlan_hdr {
 						flag_rsvd:1,
 						flag_g:1;
 #endif
-				} __rte_packed;
+				} __rte_packed_end;
 			}; /* end of 1st byte */
 			union {
 				uint8_t rsvd0[3]; /* Reserved for extensions. */
-				struct {
+				struct __rte_packed_begin {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 					uint8_t rsvd0_gbp1:1, /*   Reserved. */
 						flag_d:1,     /**< GBP Don't Learn bit. */
@@ -71,7 +71,7 @@ struct rte_vxlan_hdr {
 #endif
 					union {
 						uint16_t policy_id; /**< GBP Identifier. */
-						struct {
+						struct __rte_packed_begin {
 							uint8_t rsvd0_gpe; /* Reserved. */
 							uint8_t proto; /**< GPE Next protocol. */
 								/* 0x01 : IPv4
@@ -79,23 +79,23 @@ struct rte_vxlan_hdr {
 								 * 0x03 : Ethernet
 								 * 0x04 : Network Service Header
 								 */
-						} __rte_packed;
+						} __rte_packed_end;
 					};
-				} __rte_packed;
+				} __rte_packed_end;
 			};
-		} __rte_packed;
+		} __rte_packed_end;
 	}; /* end of 1st 32-bit word */
 	union {
 		rte_be32_t vx_vni; /**< VNI (24 bits) + reserved (8 bits). */
-		struct {
+		struct __rte_packed_begin {
 			uint8_t    vni[3];   /**< VXLAN Identifier. */
 			union {
 				uint8_t    rsvd1;        /**< Reserved. */
 				uint8_t    last_rsvd;    /**< Reserved. */
 			};
-		} __rte_packed;
+		} __rte_packed_end;
 	}; /* end of 2nd 32-bit word */
-} __rte_packed;
+} __rte_packed_end;
 
 /** VXLAN tunnel header length. */
 #define RTE_ETHER_VXLAN_HLEN \
@@ -111,7 +111,7 @@ struct rte_vxlan_hdr {
  * Identifier and Reserved fields (16 bits and 8 bits).
  */
 __extension__ /* no named member in struct */
-struct rte_vxlan_gpe_hdr {
+struct __rte_packed_begin rte_vxlan_gpe_hdr {
 	union {
 		struct {
 			uint8_t vx_flags;    /**< flag (8). */
@@ -127,7 +127,7 @@ struct rte_vxlan_gpe_hdr {
 			uint8_t rsvd1;    /**< Reserved. */
 		};
 	};
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * @deprecated
-- 
2.47.0.vfs.0.3


^ permalink raw reply	[relevance 1%]

* Re: [PATCH v8 27/29] lib/net: replace packed attributes
  2025-01-08 12:01  3%     ` David Marchand
@ 2025-01-09  2:49  0%       ` Andre Muezerie
  0 siblings, 0 replies; 169+ results
From: Andre Muezerie @ 2025-01-09  2:49 UTC (permalink / raw)
  To: David Marchand; +Cc: roretzla, dev, Thomas Monjalon, Robin Jarry

On Wed, Jan 08, 2025 at 01:01:23PM +0100, David Marchand wrote:
> On Tue, Dec 31, 2024 at 7:40 PM Andre Muezerie
> <andremue@linux.microsoft.com> wrote:
> > diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
> > index 992ab5ee1f..92558a124a 100644
> > --- a/lib/net/rte_ip6.h
> > +++ b/lib/net/rte_ip6.h
> > @@ -358,7 +358,7 @@ enum rte_ipv6_mc_scope {
> >         RTE_IPV6_MC_SCOPE_ORGLOCAL = 0x08,
> >         /** Global multicast scope. */
> >         RTE_IPV6_MC_SCOPE_GLOBAL = 0x0e,
> > -} __rte_packed;
> > +};
> >
> >  /**
> >   * Extract the IPv6 multicast scope value as defined in RFC 4291, section 2.7.
> 
> Cc: Robin for info.
> 
> This change affects the storage size of a variable of this type (at
> least with gcc).
> I think it is ok from an ABI pov: there is one (inline) helper using
> this type, and nothing else in DPDK takes a IPv6 multicast scope as
> input.
> 
> However, it deserves a mention in the commitlog (maybe a separate
> commit to highlight it?).
> 
> 
> -- 
> David Marchand

Makes sense. I added a note about that to the commit message for that patch in the v10 series.

Thanks,
Andre Muezerie

^ permalink raw reply	[relevance 0%]

* RE: [PATCH 1/2] lib/ipsec: compile ipsec on Windows
  @ 2025-01-09 15:31  3%   ` Konstantin Ananyev
  0 siblings, 0 replies; 169+ results
From: Konstantin Ananyev @ 2025-01-09 15:31 UTC (permalink / raw)
  To: Andre Muezerie, Konstantin Ananyev, Vladimir Medvedkin; +Cc: dev



> Removed VLA for compatibility with MSVC (which does not support VLAs).
> Used alloca when a constant fixed length that can be used instead is
> not known.
> 
> Implementation for rte_ipsec_pkt_crypto_group and
> rte_ipsec_ses_from_crypto was moved to new file
> lib\ipsec\ipsec_group.c because these functions get exported in a
> shared library (lib\ipsec\version.map).
> 
> Implementation for rte_ipsec_pkt_crypto_prepare and
> rte_ipsec_pkt_process was moved to new file lib\ipsec\ipsec.c because
> these functions get exported in a shared library
> (lib\ipsec\version.map).

Hmm... not sure I understood the rationale.
To me making inline functions not-inline first of all means ABI/API breakage,
plus it most likely will make things slower.


^ permalink raw reply	[relevance 3%]

* Re: [PATCH RESEND v7 2/5] ethdev: fix skip valid port in probing callback
  2024-12-10  1:50  0%     ` lihuisong (C)
@ 2025-01-10  3:21  0%       ` lihuisong (C)
  2025-01-10 17:54  3%         ` Stephen Hemminger
  0 siblings, 1 reply; 169+ results
From: lihuisong (C) @ 2025-01-10  3:21 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: dev, fengchengwen, liuyonglong, andrew.rybchenko, Somnath Kotur,
	Ajit Khaparde, Dariusz Sosnowski, Suanming Mou, Matan Azrad,
	Ori Kam, Viacheslav Ovsiienko, ferruh.yigit, thomas

Hi Stephen,

Can you take a look at my below reply and reconsider this patch?

/Huisong

在 2024/12/10 9:50, lihuisong (C) 写道:
> Hi Ferruh, Stephen and Thomas,
>
> Can you take a look at this patch? After all, it is an issue in ethdev 
> layer.
> This also is the fruit we disscussed with Thomas and Ferruh before.
> Please go back to this thread. If we don't need this patch, please let 
> me know. I will drop it from my upstreaming list.
>
> /Huisong
>
>
> 在 2024/9/29 13:52, Huisong Li 写道:
>> The event callback in application may use the macro 
>> RTE_ETH_FOREACH_DEV to
>> iterate over all enabled ports to do something(like, verifying the 
>> port id
>> validity) when receive a probing event. If the ethdev state of a port is
>> not RTE_ETH_DEV_UNUSED, this port will be considered as a valid port.
>>
>> However, this state is set to RTE_ETH_DEV_ATTACHED after pushing probing
>> event. It means that probing callback will skip this port. But this
>> assignment can not move to front of probing notification. See
>> commit be8cd210379a ("ethdev: fix port probing notification")
>>
>> So this patch has to add a new state, RTE_ETH_DEV_ALLOCATED. Set the 
>> ethdev
>> state to RTE_ETH_DEV_ALLOCATED before pushing probing event and set 
>> it to
>> RTE_ETH_DEV_ATTACHED after definitely probed. And this port is valid 
>> if its
>> device state is 'ALLOCATED' or 'ATTACHED'.
>>
>> In addition, the new state has to be placed behind 'REMOVED' to avoid 
>> ABI
>> break. Fortunately, this ethdev state is internal and applications 
>> can not
>> access it directly. So this patch encapsulates an API, 
>> rte_eth_dev_is_used,
>> for ethdev or PMD to call and eliminate concerns about using this state
>> enum value comparison.
>>
>> Fixes: be8cd210379a ("ethdev: fix port probing notification")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
>> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
>> ---
>>   drivers/net/bnxt/bnxt_ethdev.c |  3 ++-
>>   drivers/net/mlx5/mlx5.c        |  2 +-
>>   lib/ethdev/ethdev_driver.c     | 13 ++++++++++---
>>   lib/ethdev/ethdev_driver.h     | 12 ++++++++++++
>>   lib/ethdev/ethdev_pci.h        |  2 +-
>>   lib/ethdev/rte_class_eth.c     |  2 +-
>>   lib/ethdev/rte_ethdev.c        |  4 ++--
>>   lib/ethdev/rte_ethdev.h        |  4 +++-
>>   lib/ethdev/version.map         |  1 +
>>   9 files changed, 33 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/net/bnxt/bnxt_ethdev.c 
>> b/drivers/net/bnxt/bnxt_ethdev.c
>> index c6ad764813..7401dcd8b5 100644
>> --- a/drivers/net/bnxt/bnxt_ethdev.c
>> +++ b/drivers/net/bnxt/bnxt_ethdev.c
>> @@ -6612,7 +6612,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
>>         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
>>   -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
>> +
>> +    if (rte_eth_dev_is_used(eth_dev->state))
>>           bnxt_dev_close_op(eth_dev);
>>         return 0;
>> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
>> index 8d266b0e64..0df49e1f69 100644
>> --- a/drivers/net/mlx5/mlx5.c
>> +++ b/drivers/net/mlx5/mlx5.c
>> @@ -3371,7 +3371,7 @@ mlx5_eth_find_next(uint16_t port_id, struct 
>> rte_device *odev)
>>       while (port_id < RTE_MAX_ETHPORTS) {
>>           struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>>   -        if (dev->state != RTE_ETH_DEV_UNUSED &&
>> +        if (rte_eth_dev_is_used(dev->state) &&
>>               dev->device &&
>>               (dev->device == odev ||
>>                (dev->device->driver &&
>> diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
>> index c335a25a82..a87dbb00ff 100644
>> --- a/lib/ethdev/ethdev_driver.c
>> +++ b/lib/ethdev/ethdev_driver.c
>> @@ -55,8 +55,8 @@ eth_dev_find_free_port(void)
>>       for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
>>           /* Using shared name field to find a free port. */
>>           if (eth_dev_shared_data->data[i].name[0] == '\0') {
>> -            RTE_ASSERT(rte_eth_devices[i].state ==
>> -                   RTE_ETH_DEV_UNUSED);
>> +            RTE_ASSERT(!rte_eth_dev_is_used(
>> +                    rte_eth_devices[i].state));
>>               return i;
>>           }
>>       }
>> @@ -221,11 +221,18 @@ rte_eth_dev_probing_finish(struct rte_eth_dev 
>> *dev)
>>       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
>>           eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, 
>> dev);
>>   +    dev->state = RTE_ETH_DEV_ALLOCATED;
>>       rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
>>         dev->state = RTE_ETH_DEV_ATTACHED;
>>   }
>>   +bool rte_eth_dev_is_used(uint16_t dev_state)
>> +{
>> +    return dev_state == RTE_ETH_DEV_ALLOCATED ||
>> +        dev_state == RTE_ETH_DEV_ATTACHED;
>> +}
>> +
>>   int
>>   rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
>>   {
>> @@ -243,7 +250,7 @@ rte_eth_dev_release_port(struct rte_eth_dev 
>> *eth_dev)
>>       if (ret != 0)
>>           return ret;
>>   -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
>> +    if (rte_eth_dev_is_used(eth_dev->state))
>>           rte_eth_dev_callback_process(eth_dev,
>>                   RTE_ETH_EVENT_DESTROY, NULL);
>>   diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
>> index abed4784aa..aa35b65848 100644
>> --- a/lib/ethdev/ethdev_driver.h
>> +++ b/lib/ethdev/ethdev_driver.h
>> @@ -1704,6 +1704,18 @@ int rte_eth_dev_callback_process(struct 
>> rte_eth_dev *dev,
>>   __rte_internal
>>   void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
>>   +/**
>> + * Check if a Ethernet device state is used or not
>> + *
>> + * @param dev_state
>> + *   The state of the Ethernet device
>> + * @return
>> + *   - true if the state of the Ethernet device is allocated or 
>> attached
>> + *   - false if this state is neither allocated nor attached
>> + */
>> +__rte_internal
>> +bool rte_eth_dev_is_used(uint16_t dev_state);
>> +
>>   /**
>>    * Create memzone for HW rings.
>>    * malloc can't be used as the physical address is needed.
>> diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
>> index ec4f731270..05dec6716b 100644
>> --- a/lib/ethdev/ethdev_pci.h
>> +++ b/lib/ethdev/ethdev_pci.h
>> @@ -179,7 +179,7 @@ rte_eth_dev_pci_generic_remove(struct 
>> rte_pci_device *pci_dev,
>>        * eth device has been released.
>>        */
>>       if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
>> -        eth_dev->state == RTE_ETH_DEV_UNUSED)
>> +        !rte_eth_dev_is_used(eth_dev->state))
>>           return 0;
>>         if (dev_uninit) {
>> diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
>> index b52f1dd9f2..81e70670d9 100644
>> --- a/lib/ethdev/rte_class_eth.c
>> +++ b/lib/ethdev/rte_class_eth.c
>> @@ -118,7 +118,7 @@ eth_dev_match(const struct rte_eth_dev *edev,
>>       const struct rte_kvargs *kvlist = arg->kvlist;
>>       unsigned int pair;
>>   -    if (edev->state == RTE_ETH_DEV_UNUSED)
>> +    if (!rte_eth_dev_is_used(edev->state))
>>           return -1;
>>       if (arg->device != NULL && arg->device != edev->device)
>>           return -1;
>> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
>> index a1f7efa913..4dc66abb7b 100644
>> --- a/lib/ethdev/rte_ethdev.c
>> +++ b/lib/ethdev/rte_ethdev.c
>> @@ -349,7 +349,7 @@ uint16_t
>>   rte_eth_find_next(uint16_t port_id)
>>   {
>>       while (port_id < RTE_MAX_ETHPORTS &&
>> -            rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
>> + !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>>           port_id++;
>>         if (port_id >= RTE_MAX_ETHPORTS)
>> @@ -408,7 +408,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
>>       int is_valid;
>>         if (port_id >= RTE_MAX_ETHPORTS ||
>> -        (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
>> +        !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>>           is_valid = 0;
>>       else
>>           is_valid = 1;
>> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
>> index a9f92006da..9cc37e8cde 100644
>> --- a/lib/ethdev/rte_ethdev.h
>> +++ b/lib/ethdev/rte_ethdev.h
>> @@ -2083,10 +2083,12 @@ typedef uint16_t 
>> (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
>>   enum rte_eth_dev_state {
>>       /** Device is unused before being probed. */
>>       RTE_ETH_DEV_UNUSED = 0,
>> -    /** Device is attached when allocated in probing. */
>> +    /** Device is attached when definitely probed. */
>>       RTE_ETH_DEV_ATTACHED,
>>       /** Device is in removed state when plug-out is detected. */
>>       RTE_ETH_DEV_REMOVED,
>> +    /** Device is allocated and is set before reporting new event. */
>> +    RTE_ETH_DEV_ALLOCATED,
>>   };
>>     struct rte_eth_dev_sriov {
>> diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
>> index f63dc32aa2..6ecf1ab89d 100644
>> --- a/lib/ethdev/version.map
>> +++ b/lib/ethdev/version.map
>> @@ -349,6 +349,7 @@ INTERNAL {
>>       rte_eth_dev_get_by_name;
>>       rte_eth_dev_is_rx_hairpin_queue;
>>       rte_eth_dev_is_tx_hairpin_queue;
>> +    rte_eth_dev_is_used;
>>       rte_eth_dev_probing_finish;
>>       rte_eth_dev_release_port;
>>       rte_eth_dev_internal_reset;
> .

^ permalink raw reply	[relevance 0%]

* Re: [PATCH RESEND v7 2/5] ethdev: fix skip valid port in probing callback
  2025-01-10  3:21  0%       ` lihuisong (C)
@ 2025-01-10 17:54  3%         ` Stephen Hemminger
  2025-01-13  2:32  0%           ` lihuisong (C)
  0 siblings, 1 reply; 169+ results
From: Stephen Hemminger @ 2025-01-10 17:54 UTC (permalink / raw)
  To: lihuisong (C)
  Cc: dev, fengchengwen, liuyonglong, andrew.rybchenko, Somnath Kotur,
	Ajit Khaparde, Dariusz Sosnowski, Suanming Mou, Matan Azrad,
	Ori Kam, Viacheslav Ovsiienko, ferruh.yigit, thomas

On Fri, 10 Jan 2025 11:21:26 +0800
"lihuisong (C)" <lihuisong@huawei.com> wrote:

> Hi Stephen,
> 
> Can you take a look at my below reply and reconsider this patch?
> 
> /Huisong
> 
> 在 2024/12/10 9:50, lihuisong (C) 写道:
> > Hi Ferruh, Stephen and Thomas,
> >
> > Can you take a look at this patch? After all, it is an issue in ethdev 
> > layer.
> > This also is the fruit we disscussed with Thomas and Ferruh before.
> > Please go back to this thread. If we don't need this patch, please let 
> > me know. I will drop it from my upstreaming list.
> >
> > /Huisong
> >
> >
> > 在 2024/9/29 13:52, Huisong Li 写道:  
> >> The event callback in application may use the macro 
> >> RTE_ETH_FOREACH_DEV to
> >> iterate over all enabled ports to do something(like, verifying the 
> >> port id
> >> validity) when receive a probing event. If the ethdev state of a port is
> >> not RTE_ETH_DEV_UNUSED, this port will be considered as a valid port.
> >>
> >> However, this state is set to RTE_ETH_DEV_ATTACHED after pushing probing
> >> event. It means that probing callback will skip this port. But this
> >> assignment can not move to front of probing notification. See
> >> commit be8cd210379a ("ethdev: fix port probing notification")
> >>
> >> So this patch has to add a new state, RTE_ETH_DEV_ALLOCATED. Set the 
> >> ethdev
> >> state to RTE_ETH_DEV_ALLOCATED before pushing probing event and set 
> >> it to
> >> RTE_ETH_DEV_ATTACHED after definitely probed. And this port is valid 
> >> if its
> >> device state is 'ALLOCATED' or 'ATTACHED'.
> >>
> >> In addition, the new state has to be placed behind 'REMOVED' to avoid 
> >> ABI
> >> break. Fortunately, this ethdev state is internal and applications 
> >> can not
> >> access it directly. So this patch encapsulates an API, 
> >> rte_eth_dev_is_used,
> >> for ethdev or PMD to call and eliminate concerns about using this state
> >> enum value comparison.
> >>
> >> Fixes: be8cd210379a ("ethdev: fix port probing notification")
> >> Cc: stable@dpdk.org
> >>
> >> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> >> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
> >> ---
> >>   drivers/net/bnxt/bnxt_ethdev.c |  3 ++-
> >>   drivers/net/mlx5/mlx5.c        |  2 +-
> >>   lib/ethdev/ethdev_driver.c     | 13 ++++++++++---
> >>   lib/ethdev/ethdev_driver.h     | 12 ++++++++++++
> >>   lib/ethdev/ethdev_pci.h        |  2 +-
> >>   lib/ethdev/rte_class_eth.c     |  2 +-
> >>   lib/ethdev/rte_ethdev.c        |  4 ++--
> >>   lib/ethdev/rte_ethdev.h        |  4 +++-
> >>   lib/ethdev/version.map         |  1 +
> >>   9 files changed, 33 insertions(+), 10 deletions(-)
> >>
> >> diff --git a/drivers/net/bnxt/bnxt_ethdev.c 
> >> b/drivers/net/bnxt/bnxt_ethdev.c
> >> index c6ad764813..7401dcd8b5 100644
> >> --- a/drivers/net/bnxt/bnxt_ethdev.c
> >> +++ b/drivers/net/bnxt/bnxt_ethdev.c
> >> @@ -6612,7 +6612,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
> >>         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
> >>   -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
> >> +
> >> +    if (rte_eth_dev_is_used(eth_dev->state))
> >>           bnxt_dev_close_op(eth_dev);
> >>         return 0;
> >> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> >> index 8d266b0e64..0df49e1f69 100644
> >> --- a/drivers/net/mlx5/mlx5.c
> >> +++ b/drivers/net/mlx5/mlx5.c
> >> @@ -3371,7 +3371,7 @@ mlx5_eth_find_next(uint16_t port_id, struct 
> >> rte_device *odev)
> >>       while (port_id < RTE_MAX_ETHPORTS) {
> >>           struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> >>   -        if (dev->state != RTE_ETH_DEV_UNUSED &&
> >> +        if (rte_eth_dev_is_used(dev->state) &&
> >>               dev->device &&
> >>               (dev->device == odev ||
> >>                (dev->device->driver &&
> >> diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
> >> index c335a25a82..a87dbb00ff 100644
> >> --- a/lib/ethdev/ethdev_driver.c
> >> +++ b/lib/ethdev/ethdev_driver.c
> >> @@ -55,8 +55,8 @@ eth_dev_find_free_port(void)
> >>       for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
> >>           /* Using shared name field to find a free port. */
> >>           if (eth_dev_shared_data->data[i].name[0] == '\0') {
> >> -            RTE_ASSERT(rte_eth_devices[i].state ==
> >> -                   RTE_ETH_DEV_UNUSED);
> >> +            RTE_ASSERT(!rte_eth_dev_is_used(
> >> +                    rte_eth_devices[i].state));
> >>               return i;
> >>           }
> >>       }
> >> @@ -221,11 +221,18 @@ rte_eth_dev_probing_finish(struct rte_eth_dev 
> >> *dev)
> >>       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
> >>           eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, 
> >> dev);
> >>   +    dev->state = RTE_ETH_DEV_ALLOCATED;
> >>       rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
> >>         dev->state = RTE_ETH_DEV_ATTACHED;
> >>   }
> >>   +bool rte_eth_dev_is_used(uint16_t dev_state)
> >> +{
> >> +    return dev_state == RTE_ETH_DEV_ALLOCATED ||
> >> +        dev_state == RTE_ETH_DEV_ATTACHED;
> >> +}
> >> +
> >>   int
> >>   rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
> >>   {
> >> @@ -243,7 +250,7 @@ rte_eth_dev_release_port(struct rte_eth_dev 
> >> *eth_dev)
> >>       if (ret != 0)
> >>           return ret;
> >>   -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
> >> +    if (rte_eth_dev_is_used(eth_dev->state))
> >>           rte_eth_dev_callback_process(eth_dev,
> >>                   RTE_ETH_EVENT_DESTROY, NULL);
> >>   diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> >> index abed4784aa..aa35b65848 100644
> >> --- a/lib/ethdev/ethdev_driver.h
> >> +++ b/lib/ethdev/ethdev_driver.h
> >> @@ -1704,6 +1704,18 @@ int rte_eth_dev_callback_process(struct 
> >> rte_eth_dev *dev,
> >>   __rte_internal
> >>   void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
> >>   +/**
> >> + * Check if a Ethernet device state is used or not
> >> + *
> >> + * @param dev_state
> >> + *   The state of the Ethernet device
> >> + * @return
> >> + *   - true if the state of the Ethernet device is allocated or 
> >> attached
> >> + *   - false if this state is neither allocated nor attached
> >> + */
> >> +__rte_internal
> >> +bool rte_eth_dev_is_used(uint16_t dev_state);
> >> +
> >>   /**
> >>    * Create memzone for HW rings.
> >>    * malloc can't be used as the physical address is needed.
> >> diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
> >> index ec4f731270..05dec6716b 100644
> >> --- a/lib/ethdev/ethdev_pci.h
> >> +++ b/lib/ethdev/ethdev_pci.h
> >> @@ -179,7 +179,7 @@ rte_eth_dev_pci_generic_remove(struct 
> >> rte_pci_device *pci_dev,
> >>        * eth device has been released.
> >>        */
> >>       if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
> >> -        eth_dev->state == RTE_ETH_DEV_UNUSED)
> >> +        !rte_eth_dev_is_used(eth_dev->state))
> >>           return 0;
> >>         if (dev_uninit) {
> >> diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
> >> index b52f1dd9f2..81e70670d9 100644
> >> --- a/lib/ethdev/rte_class_eth.c
> >> +++ b/lib/ethdev/rte_class_eth.c
> >> @@ -118,7 +118,7 @@ eth_dev_match(const struct rte_eth_dev *edev,
> >>       const struct rte_kvargs *kvlist = arg->kvlist;
> >>       unsigned int pair;
> >>   -    if (edev->state == RTE_ETH_DEV_UNUSED)
> >> +    if (!rte_eth_dev_is_used(edev->state))
> >>           return -1;
> >>       if (arg->device != NULL && arg->device != edev->device)
> >>           return -1;
> >> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
> >> index a1f7efa913..4dc66abb7b 100644
> >> --- a/lib/ethdev/rte_ethdev.c
> >> +++ b/lib/ethdev/rte_ethdev.c
> >> @@ -349,7 +349,7 @@ uint16_t
> >>   rte_eth_find_next(uint16_t port_id)
> >>   {
> >>       while (port_id < RTE_MAX_ETHPORTS &&
> >> -            rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
> >> + !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
> >>           port_id++;
> >>         if (port_id >= RTE_MAX_ETHPORTS)
> >> @@ -408,7 +408,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
> >>       int is_valid;
> >>         if (port_id >= RTE_MAX_ETHPORTS ||
> >> -        (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
> >> +        !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
> >>           is_valid = 0;
> >>       else
> >>           is_valid = 1;
> >> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
> >> index a9f92006da..9cc37e8cde 100644
> >> --- a/lib/ethdev/rte_ethdev.h
> >> +++ b/lib/ethdev/rte_ethdev.h
> >> @@ -2083,10 +2083,12 @@ typedef uint16_t 
> >> (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
> >>   enum rte_eth_dev_state {
> >>       /** Device is unused before being probed. */
> >>       RTE_ETH_DEV_UNUSED = 0,
> >> -    /** Device is attached when allocated in probing. */
> >> +    /** Device is attached when definitely probed. */
> >>       RTE_ETH_DEV_ATTACHED,
> >>       /** Device is in removed state when plug-out is detected. */
> >>       RTE_ETH_DEV_REMOVED,
> >> +    /** Device is allocated and is set before reporting new event. */
> >> +    RTE_ETH_DEV_ALLOCATED,
> >>   };
> >>     struct rte_eth_dev_sriov {
> >> diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
> >> index f63dc32aa2..6ecf1ab89d 100644
> >> --- a/lib/ethdev/version.map
> >> +++ b/lib/ethdev/version.map
> >> @@ -349,6 +349,7 @@ INTERNAL {
> >>       rte_eth_dev_get_by_name;
> >>       rte_eth_dev_is_rx_hairpin_queue;
> >>       rte_eth_dev_is_tx_hairpin_queue;
> >> +    rte_eth_dev_is_used;
> >>       rte_eth_dev_probing_finish;
> >>       rte_eth_dev_release_port;
> >>       rte_eth_dev_internal_reset;  
> > .  

Please resubmit for 25.03 release.
But it looks like an API/ABI change since rte_eth_dev_state is visible
to applications.

A more detailed bug report would also help

^ permalink raw reply	[relevance 3%]

* [PATCH v11 27/30] net: replace packed attributes
  @ 2025-01-10 22:16  1%   ` Andre Muezerie
  0 siblings, 0 replies; 169+ results
From: Andre Muezerie @ 2025-01-10 22:16 UTC (permalink / raw)
  To: roretzla
  Cc: aman.deep.singh, anatoly.burakov, bruce.richardson, byron.marohn,
	conor.walsh, cristian.dumitrescu, david.hunt, dev, dsosnowski,
	gakhil, jerinj, jingjing.wu, kirill.rybalchenko,
	konstantin.v.ananyev, matan, mb, orika, radu.nicolau,
	ruifeng.wang, sameh.gobriel, sivaprasad.tummala, skori, stephen,
	suanmingm, vattunuru, viacheslavo, vladimir.medvedkin,
	yipeng1.wang, Andre Muezerie

MSVC struct packing is not compatible with GCC. Replace macro
__rte_packed with __rte_packed_begin to push existing pack value
and set packing to 1-byte and macro __rte_packed_end to restore
the pack value prior to the push.

Macro __rte_packed_end is deliberately utilized to trigger a
MSVC compiler warning if no existing packing has been pushed allowing
easy identification of locations where the __rte_packed_begin is
missing.

This change affects the storage size of a variable of enum
rte_ipv6_mc_scope (at least with gcc). It should be OK from an ABI POV
though: there is one (inline) helper using this type, and nothing else
in DPDK takes a IPv6 multicast scope as input.

Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
---
 lib/net/rte_arp.h      |  8 ++++----
 lib/net/rte_dtls.h     |  4 ++--
 lib/net/rte_esp.h      |  8 ++++----
 lib/net/rte_geneve.h   |  4 ++--
 lib/net/rte_gre.h      | 16 ++++++++--------
 lib/net/rte_gtp.h      | 20 ++++++++++----------
 lib/net/rte_ib.h       |  4 ++--
 lib/net/rte_icmp.h     | 12 ++++++------
 lib/net/rte_ip4.h      |  4 ++--
 lib/net/rte_ip6.h      | 14 +++++++-------
 lib/net/rte_l2tpv2.h   | 16 ++++++++--------
 lib/net/rte_macsec.h   |  8 ++++----
 lib/net/rte_mpls.h     |  4 ++--
 lib/net/rte_pdcp_hdr.h | 16 ++++++++--------
 lib/net/rte_ppp.h      |  4 ++--
 lib/net/rte_sctp.h     |  4 ++--
 lib/net/rte_tcp.h      |  4 ++--
 lib/net/rte_tls.h      |  4 ++--
 lib/net/rte_udp.h      |  4 ++--
 lib/net/rte_vxlan.h    | 28 ++++++++++++++--------------
 20 files changed, 93 insertions(+), 93 deletions(-)

diff --git a/lib/net/rte_arp.h b/lib/net/rte_arp.h
index 668cea1704..e885a71292 100644
--- a/lib/net/rte_arp.h
+++ b/lib/net/rte_arp.h
@@ -21,17 +21,17 @@ extern "C" {
 /**
  * ARP header IPv4 payload.
  */
-struct __rte_aligned(2) rte_arp_ipv4 {
+struct __rte_aligned(2) __rte_packed_begin rte_arp_ipv4 {
 	struct rte_ether_addr arp_sha;  /**< sender hardware address */
 	rte_be32_t            arp_sip;  /**< sender IP address */
 	struct rte_ether_addr arp_tha;  /**< target hardware address */
 	rte_be32_t            arp_tip;  /**< target IP address */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ARP header.
  */
-struct __rte_aligned(2) rte_arp_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_arp_hdr {
 	rte_be16_t arp_hardware; /**< format of hardware address */
 #define RTE_ARP_HRD_ETHER     1  /**< ARP Ethernet address format */
 
@@ -47,7 +47,7 @@ struct __rte_aligned(2) rte_arp_hdr {
 #define	RTE_ARP_OP_INVREPLY   9  /**< response identifying peer */
 
 	struct rte_arp_ipv4 arp_data;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Make a RARP packet based on MAC addr.
diff --git a/lib/net/rte_dtls.h b/lib/net/rte_dtls.h
index 246cd8a72d..1dd95ce899 100644
--- a/lib/net/rte_dtls.h
+++ b/lib/net/rte_dtls.h
@@ -30,7 +30,7 @@
  * DTLS Header
  */
 __extension__
-struct rte_dtls_hdr {
+struct __rte_packed_begin rte_dtls_hdr {
 	/** Content type of DTLS packet. Defined as RTE_DTLS_TYPE_*. */
 	uint8_t type;
 	/** DTLS Version defined as RTE_DTLS_VERSION*. */
@@ -48,6 +48,6 @@ struct rte_dtls_hdr {
 #endif
 	/** The length (in bytes) of the following DTLS packet. */
 	rte_be16_t length;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_DTLS_H */
diff --git a/lib/net/rte_esp.h b/lib/net/rte_esp.h
index 745a9847fe..2a0002f4d9 100644
--- a/lib/net/rte_esp.h
+++ b/lib/net/rte_esp.h
@@ -16,17 +16,17 @@
 /**
  * ESP Header
  */
-struct rte_esp_hdr {
+struct __rte_packed_begin rte_esp_hdr {
 	rte_be32_t spi;  /**< Security Parameters Index */
 	rte_be32_t seq;  /**< packet sequence number */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ESP Trailer
  */
-struct rte_esp_tail {
+struct __rte_packed_begin rte_esp_tail {
 	uint8_t pad_len;     /**< number of pad bytes (0-255) */
 	uint8_t next_proto;  /**< IPv4 or IPv6 or next layer header */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_ESP_H_ */
diff --git a/lib/net/rte_geneve.h b/lib/net/rte_geneve.h
index eb2c85f1e9..f962c587ee 100644
--- a/lib/net/rte_geneve.h
+++ b/lib/net/rte_geneve.h
@@ -34,7 +34,7 @@
  * More-bits (optional) variable length options.
  */
 __extension__
-struct rte_geneve_hdr {
+struct __rte_packed_begin rte_geneve_hdr {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t ver:2;		/**< Version. */
 	uint8_t opt_len:6;	/**< Options length. */
@@ -52,7 +52,7 @@ struct rte_geneve_hdr {
 	uint8_t vni[3];		/**< Virtual network identifier. */
 	uint8_t reserved2;	/**< Reserved. */
 	uint32_t opts[];	/**< Variable length options. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* GENEVE ETH next protocol types */
 #define RTE_GENEVE_TYPE_ETH	0x6558 /**< Ethernet Protocol. */
diff --git a/lib/net/rte_gre.h b/lib/net/rte_gre.h
index 1483e1b42d..768c4ce7b5 100644
--- a/lib/net/rte_gre.h
+++ b/lib/net/rte_gre.h
@@ -23,7 +23,7 @@
  * GRE Header
  */
 __extension__
-struct rte_gre_hdr {
+struct __rte_packed_begin rte_gre_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint16_t res2:4; /**< Reserved */
 	uint16_t s:1;    /**< Sequence Number Present bit */
@@ -42,28 +42,28 @@ struct rte_gre_hdr {
 	uint16_t ver:3;  /**< Version Number */
 #endif
 	rte_be16_t proto;  /**< Protocol Type */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field checksum in GRE header
  */
-struct rte_gre_hdr_opt_checksum_rsvd {
+struct __rte_packed_begin rte_gre_hdr_opt_checksum_rsvd {
 	rte_be16_t checksum;
 	rte_be16_t reserved1;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field key in GRE header
  */
-struct rte_gre_hdr_opt_key {
+struct __rte_packed_begin rte_gre_hdr_opt_key {
 	rte_be32_t key;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional field sequence in GRE header
  */
-struct rte_gre_hdr_opt_sequence {
+struct __rte_packed_begin rte_gre_hdr_opt_sequence {
 	rte_be32_t sequence;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_GRE_H_ */
diff --git a/lib/net/rte_gtp.h b/lib/net/rte_gtp.h
index ab06e23a6e..0332d35c16 100644
--- a/lib/net/rte_gtp.h
+++ b/lib/net/rte_gtp.h
@@ -24,7 +24,7 @@
  * No optional fields and next extension header.
  */
 __extension__
-struct rte_gtp_hdr {
+struct __rte_packed_begin rte_gtp_hdr {
 	union {
 		uint8_t gtp_hdr_info; /**< GTP header info */
 		struct {
@@ -48,21 +48,21 @@ struct rte_gtp_hdr {
 	uint8_t msg_type;     /**< GTP message type */
 	rte_be16_t plen;      /**< Total payload length */
 	rte_be32_t teid;      /**< Tunnel endpoint ID */
-} __rte_packed;
+} __rte_packed_end;
 
 /* Optional word of GTP header, present if any of E, S, PN is set. */
-struct rte_gtp_hdr_ext_word {
+struct __rte_packed_begin rte_gtp_hdr_ext_word {
 	rte_be16_t sqn;	      /**< Sequence Number. */
 	uint8_t npdu;	      /**< N-PDU number. */
 	uint8_t next_ext;     /**< Next Extension Header Type. */
-}  __rte_packed;
+}  __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * defined based on RFC 38415-g30.
  */
 __extension__
-struct rte_gtp_psc_generic_hdr {
+struct __rte_packed_begin rte_gtp_psc_generic_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -78,14 +78,14 @@ struct rte_gtp_psc_generic_hdr {
 	uint8_t spare:2;	/**< type specific spare bits */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * type0 defined based on RFC 38415-g30
  */
 __extension__
-struct rte_gtp_psc_type0_hdr {
+struct __rte_packed_begin rte_gtp_psc_type0_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -105,14 +105,14 @@ struct rte_gtp_psc_type0_hdr {
 	uint8_t ppp:1;		/**< Paging policy presence */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Optional extension for GTP with next_ext set to 0x85
  * type1 defined based on RFC 38415-g30
  */
 __extension__
-struct rte_gtp_psc_type1_hdr {
+struct __rte_packed_begin rte_gtp_psc_type1_hdr {
 	uint8_t ext_hdr_len;	/**< PDU ext hdr len in multiples of 4 bytes */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t type:4;		/**< PDU type */
@@ -134,7 +134,7 @@ struct rte_gtp_psc_type1_hdr {
 	uint8_t n_delay_ind:1;	/**< N3/N9 delay result presence */
 #endif
 	uint8_t data[0];	/**< variable length data fields */
-} __rte_packed;
+} __rte_packed_end;
 
 /** GTP header length */
 #define RTE_ETHER_GTP_HLEN \
diff --git a/lib/net/rte_ib.h b/lib/net/rte_ib.h
index a551f3753f..f1b455cea0 100644
--- a/lib/net/rte_ib.h
+++ b/lib/net/rte_ib.h
@@ -22,7 +22,7 @@
  * IB Specification Vol 1-Release-1.4.
  */
 __extension__
-struct rte_ib_bth {
+struct __rte_packed_begin rte_ib_bth {
 	uint8_t	opcode;		/**< Opcode. */
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t	tver:4;		/**< Transport Header Version. */
@@ -54,7 +54,7 @@ struct rte_ib_bth {
 	uint8_t	rsvd1:7;	/**< Reserved. */
 #endif
 	uint8_t	psn[3];		/**< Packet Sequence Number */
-} __rte_packed;
+} __rte_packed_end;
 
 /** RoCEv2 default port. */
 #define RTE_ROCEV2_DEFAULT_PORT 4791
diff --git a/lib/net/rte_icmp.h b/lib/net/rte_icmp.h
index e69d68ab6e..cca73b3733 100644
--- a/lib/net/rte_icmp.h
+++ b/lib/net/rte_icmp.h
@@ -21,33 +21,33 @@
 /**
  * ICMP base header
  */
-struct rte_icmp_base_hdr {
+struct __rte_packed_begin rte_icmp_base_hdr {
 	uint8_t type;
 	uint8_t code;
 	rte_be16_t checksum;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ICMP echo header
  */
-struct rte_icmp_echo_hdr {
+struct __rte_packed_begin rte_icmp_echo_hdr {
 	struct rte_icmp_base_hdr base;
 	rte_be16_t identifier;
 	rte_be16_t sequence;
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * ICMP Header
  *
  * @see rte_icmp_echo_hdr which is similar.
  */
-struct rte_icmp_hdr {
+struct __rte_packed_begin rte_icmp_hdr {
 	uint8_t  icmp_type;     /* ICMP packet type. */
 	uint8_t  icmp_code;     /* ICMP packet code. */
 	rte_be16_t icmp_cksum;  /* ICMP packet checksum. */
 	rte_be16_t icmp_ident;  /* ICMP packet identifier. */
 	rte_be16_t icmp_seq_nb; /* ICMP packet sequence number. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* ICMP packet types */
 #define RTE_ICMP_TYPE_ECHO_REPLY 0
diff --git a/lib/net/rte_ip4.h b/lib/net/rte_ip4.h
index f9b8333332..d4b38c513c 100644
--- a/lib/net/rte_ip4.h
+++ b/lib/net/rte_ip4.h
@@ -39,7 +39,7 @@ extern "C" {
 /**
  * IPv4 Header
  */
-struct __rte_aligned(2) rte_ipv4_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv4_hdr {
 	__extension__
 	union {
 		uint8_t version_ihl;    /**< version and header length */
@@ -62,7 +62,7 @@ struct __rte_aligned(2) rte_ipv4_hdr {
 	rte_be16_t hdr_checksum;	/**< header checksum */
 	rte_be32_t src_addr;		/**< source address */
 	rte_be32_t dst_addr;		/**< destination address */
-} __rte_packed;
+} __rte_packed_end;
 
 /** Create IPv4 address */
 #define RTE_IPV4(a, b, c, d) ((uint32_t)(((a) & 0xff) << 24) | \
diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 992ab5ee1f..92558a124a 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -358,7 +358,7 @@ enum rte_ipv6_mc_scope {
 	RTE_IPV6_MC_SCOPE_ORGLOCAL = 0x08,
 	/** Global multicast scope. */
 	RTE_IPV6_MC_SCOPE_GLOBAL = 0x0e,
-} __rte_packed;
+};
 
 /**
  * Extract the IPv6 multicast scope value as defined in RFC 4291, section 2.7.
@@ -461,7 +461,7 @@ rte_ether_mcast_from_ipv6(struct rte_ether_addr *mac, const struct rte_ipv6_addr
 /**
  * IPv6 Header
  */
-struct __rte_aligned(2) rte_ipv6_hdr {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_hdr {
 	union {
 		rte_be32_t vtc_flow;        /**< IP version, traffic class & flow label. */
 		__extension__
@@ -484,7 +484,7 @@ struct __rte_aligned(2) rte_ipv6_hdr {
 	uint8_t  hop_limits;	/**< Hop limits. */
 	struct rte_ipv6_addr src_addr;	/**< IP address of source host. */
 	struct rte_ipv6_addr dst_addr;	/**< IP address of destination host(s). */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * Check that the IPv6 header version field is valid according to RFC 8200 section 3.
@@ -508,7 +508,7 @@ static inline int rte_ipv6_check_version(const struct rte_ipv6_hdr *ip)
 /**
  * IPv6 Routing Extension Header
  */
-struct __rte_aligned(2) rte_ipv6_routing_ext {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_routing_ext {
 	uint8_t next_hdr;			/**< Protocol, next header. */
 	uint8_t hdr_len;			/**< Header length. */
 	uint8_t type;				/**< Extension header type. */
@@ -523,7 +523,7 @@ struct __rte_aligned(2) rte_ipv6_routing_ext {
 		};
 	};
 	/* Next are 128-bit IPv6 address fields to describe segments. */
-} __rte_packed;
+} __rte_packed_end;
 
 /* IPv6 vtc_flow: IPv / TC / flow_label */
 #define RTE_IPV6_HDR_FL_SHIFT 0
@@ -752,12 +752,12 @@ rte_ipv6_udptcp_cksum_mbuf_verify(const struct rte_mbuf *m,
 #define RTE_IPV6_SET_FRAG_DATA(fo, mf)	\
 	(((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK))
 
-struct __rte_aligned(2) rte_ipv6_fragment_ext {
+struct __rte_aligned(2) __rte_packed_begin rte_ipv6_fragment_ext {
 	uint8_t next_header;	/**< Next header type */
 	uint8_t reserved;	/**< Reserved */
 	rte_be16_t frag_data;	/**< All fragmentation data */
 	rte_be32_t id;		/**< Packet ID */
-} __rte_packed;
+} __rte_packed_end;
 
 /* IPv6 fragment extension header size */
 #define RTE_IPV6_FRAG_HDR_SIZE	sizeof(struct rte_ipv6_fragment_ext)
diff --git a/lib/net/rte_l2tpv2.h b/lib/net/rte_l2tpv2.h
index ac16657856..728dc01506 100644
--- a/lib/net/rte_l2tpv2.h
+++ b/lib/net/rte_l2tpv2.h
@@ -125,7 +125,7 @@ struct rte_l2tpv2_common_hdr {
  * L2TPv2 message Header contains all options(length, ns, nr,
  * offset size, offset padding).
  */
-struct rte_l2tpv2_msg_with_all_options {
+struct __rte_packed_begin rte_l2tpv2_msg_with_all_options {
 	rte_be16_t length;		/**< length(16) */
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
@@ -133,20 +133,20 @@ struct rte_l2tpv2_msg_with_all_options {
 	rte_be16_t nr;			/**< Nr(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains all options except length(ns, nr,
  * offset size, offset padding).
  */
-struct rte_l2tpv2_msg_without_length {
+struct __rte_packed_begin rte_l2tpv2_msg_without_length {
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
 	rte_be16_t ns;			/**< Ns(16) */
 	rte_be16_t nr;			/**< Nr(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains all options except ns_nr(length,
@@ -176,12 +176,12 @@ struct rte_l2tpv2_msg_without_offset {
 /**
  * L2TPv2 message Header contains options offset size and offset padding.
  */
-struct rte_l2tpv2_msg_with_offset {
+struct __rte_packed_begin rte_l2tpv2_msg_with_offset {
 	rte_be16_t tunnel_id;		/**< tunnel ID(16) */
 	rte_be16_t session_id;		/**< session ID(16) */
 	rte_be16_t offset_size;		/**< offset size(16) */
 	uint8_t   *offset_padding;	/**< offset padding(variable length) */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * L2TPv2 message Header contains options ns and nr.
@@ -213,7 +213,7 @@ struct rte_l2tpv2_msg_without_all_options {
 /**
  * L2TPv2 Combined Message Header Format: Common Header + Options
  */
-struct rte_l2tpv2_combined_msg_hdr {
+struct __rte_packed_begin rte_l2tpv2_combined_msg_hdr {
 	struct rte_l2tpv2_common_hdr common; /**< common header */
 	union {
 		/** header with all options */
@@ -233,6 +233,6 @@ struct rte_l2tpv2_combined_msg_hdr {
 		/** header without all options */
 		struct rte_l2tpv2_msg_without_all_options type7;
 	};
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* _RTE_L2TPV2_H_ */
diff --git a/lib/net/rte_macsec.h b/lib/net/rte_macsec.h
index beeeb8effe..c694c37b4b 100644
--- a/lib/net/rte_macsec.h
+++ b/lib/net/rte_macsec.h
@@ -25,7 +25,7 @@
  * MACsec Header (SecTAG)
  */
 __extension__
-struct rte_macsec_hdr {
+struct __rte_packed_begin rte_macsec_hdr {
 	/**
 	 * Tag control information and Association number of secure channel.
 	 * Various bits of TCI and AN are masked using RTE_MACSEC_TCI_* and RTE_MACSEC_AN_MASK.
@@ -39,7 +39,7 @@ struct rte_macsec_hdr {
 	uint8_t short_length:6; /**< Short Length. */
 #endif
 	rte_be32_t packet_number; /**< Packet number to support replay protection. */
-} __rte_packed;
+} __rte_packed_end;
 
 /** SCI length in MACsec header if present. */
 #define RTE_MACSEC_SCI_LEN 8
@@ -48,8 +48,8 @@ struct rte_macsec_hdr {
  * MACsec SCI header (8 bytes) after the MACsec header
  * which is present if SC bit is set in tci_an.
  */
-struct rte_macsec_sci_hdr {
+struct __rte_packed_begin rte_macsec_sci_hdr {
 	uint8_t sci[RTE_MACSEC_SCI_LEN]; /**< Optional secure channel ID. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_MACSEC_H */
diff --git a/lib/net/rte_mpls.h b/lib/net/rte_mpls.h
index 35a356efd3..53614a0b88 100644
--- a/lib/net/rte_mpls.h
+++ b/lib/net/rte_mpls.h
@@ -18,7 +18,7 @@
  * MPLS header.
  */
 __extension__
-struct rte_mpls_hdr {
+struct __rte_packed_begin rte_mpls_hdr {
 	rte_be16_t tag_msb; /**< Label(msb). */
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	uint8_t tag_lsb:4;  /**< Label(lsb). */
@@ -30,6 +30,6 @@ struct rte_mpls_hdr {
 	uint8_t tag_lsb:4;  /**< label(lsb) */
 #endif
 	uint8_t  ttl;       /**< Time to live. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_MPLS_H_ */
diff --git a/lib/net/rte_pdcp_hdr.h b/lib/net/rte_pdcp_hdr.h
index c22b66bf93..2e8da1e1d3 100644
--- a/lib/net/rte_pdcp_hdr.h
+++ b/lib/net/rte_pdcp_hdr.h
@@ -56,7 +56,7 @@ enum rte_pdcp_pdu_type {
  * 6.2.2.1 Data PDU for SRBs
  */
 __extension__
-struct rte_pdcp_cp_data_pdu_sn_12_hdr {
+struct __rte_packed_begin rte_pdcp_cp_data_pdu_sn_12_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 	uint8_t r : 4;		/**< Reserved */
@@ -65,13 +65,13 @@ struct rte_pdcp_cp_data_pdu_sn_12_hdr {
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 #endif
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.2.2 Data PDU for DRBs and MRBs with 12 bits PDCP SN
  */
 __extension__
-struct rte_pdcp_up_data_pdu_sn_12_hdr {
+struct __rte_packed_begin rte_pdcp_up_data_pdu_sn_12_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 	uint8_t r : 3;		/**< Reserved */
@@ -82,13 +82,13 @@ struct rte_pdcp_up_data_pdu_sn_12_hdr {
 	uint8_t sn_11_8 : 4;	/**< Sequence number bits 8-11 */
 #endif
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.2.3 Data PDU for DRBs and MRBs with 18 bits PDCP SN
  */
 __extension__
-struct rte_pdcp_up_data_pdu_sn_18_hdr {
+struct __rte_packed_begin rte_pdcp_up_data_pdu_sn_18_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t sn_17_16 : 2;	/**< Sequence number bits 16-17 */
 	uint8_t r : 5;		/**< Reserved */
@@ -100,13 +100,13 @@ struct rte_pdcp_up_data_pdu_sn_18_hdr {
 #endif
 	uint8_t sn_15_8;	/**< Sequence number bits 8-15 */
 	uint8_t sn_7_0;		/**< Sequence number bits 0-7 */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * 6.2.3.1 Control PDU for PDCP status report
  */
 __extension__
-struct rte_pdcp_up_ctrl_pdu_hdr {
+struct __rte_packed_begin rte_pdcp_up_ctrl_pdu_hdr {
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 	uint8_t r : 4;		/**< Reserved */
 	uint8_t pdu_type : 3;	/**< Control PDU type */
@@ -134,6 +134,6 @@ struct rte_pdcp_up_ctrl_pdu_hdr {
 	 * in the Bitmap is 1.
 	 */
 	uint8_t bitmap[];
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_PDCP_HDR_H */
diff --git a/lib/net/rte_ppp.h b/lib/net/rte_ppp.h
index 63c72a9392..02bfb03c03 100644
--- a/lib/net/rte_ppp.h
+++ b/lib/net/rte_ppp.h
@@ -17,10 +17,10 @@
 /**
  * PPP Header
  */
-struct rte_ppp_hdr {
+struct __rte_packed_begin rte_ppp_hdr {
 	uint8_t addr; /**< PPP address(8) */
 	uint8_t ctrl; /**< PPP control(8) */
 	rte_be16_t proto_id; /**< PPP protocol identifier(16) */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* _RTE_PPP_H_ */
diff --git a/lib/net/rte_sctp.h b/lib/net/rte_sctp.h
index e757c57db3..73051b94fd 100644
--- a/lib/net/rte_sctp.h
+++ b/lib/net/rte_sctp.h
@@ -21,11 +21,11 @@
 /**
  * SCTP Header
  */
-struct rte_sctp_hdr {
+struct __rte_packed_begin rte_sctp_hdr {
 	rte_be16_t src_port; /**< Source port. */
 	rte_be16_t dst_port; /**< Destin port. */
 	rte_be32_t tag;      /**< Validation tag. */
 	rte_be32_t cksum;    /**< Checksum. */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_SCTP_H_ */
diff --git a/lib/net/rte_tcp.h b/lib/net/rte_tcp.h
index 1bcacbf038..fb0eb308f5 100644
--- a/lib/net/rte_tcp.h
+++ b/lib/net/rte_tcp.h
@@ -21,7 +21,7 @@
 /**
  * TCP Header
  */
-struct rte_tcp_hdr {
+struct __rte_packed_begin rte_tcp_hdr {
 	rte_be16_t src_port; /**< TCP source port. */
 	rte_be16_t dst_port; /**< TCP destination port. */
 	rte_be32_t sent_seq; /**< TX data sequence number. */
@@ -31,7 +31,7 @@ struct rte_tcp_hdr {
 	rte_be16_t rx_win;   /**< RX flow control window. */
 	rte_be16_t cksum;    /**< TCP checksum. */
 	rte_be16_t tcp_urp;  /**< TCP urgent pointer, if any. */
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * TCP Flags
diff --git a/lib/net/rte_tls.h b/lib/net/rte_tls.h
index 595567e3e9..f27db3acb1 100644
--- a/lib/net/rte_tls.h
+++ b/lib/net/rte_tls.h
@@ -28,13 +28,13 @@
  * TLS Header
  */
 __extension__
-struct rte_tls_hdr {
+struct __rte_packed_begin rte_tls_hdr {
 	/** Content type of TLS packet. Defined as RTE_TLS_TYPE_*. */
 	uint8_t type;
 	/** TLS Version defined as RTE_TLS_VERSION*. */
 	rte_be16_t version;
 	/** The length (in bytes) of the following TLS packet. */
 	rte_be16_t length;
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_TLS_H */
diff --git a/lib/net/rte_udp.h b/lib/net/rte_udp.h
index c01dad9c9b..94f5304e6d 100644
--- a/lib/net/rte_udp.h
+++ b/lib/net/rte_udp.h
@@ -21,11 +21,11 @@
 /**
  * UDP Header
  */
-struct rte_udp_hdr {
+struct __rte_packed_begin rte_udp_hdr {
 	rte_be16_t src_port;    /**< UDP source port. */
 	rte_be16_t dst_port;    /**< UDP destination port. */
 	rte_be16_t dgram_len;   /**< UDP datagram length */
 	rte_be16_t dgram_cksum; /**< UDP datagram checksum */
-} __rte_packed;
+} __rte_packed_end;
 
 #endif /* RTE_UDP_H_ */
diff --git a/lib/net/rte_vxlan.h b/lib/net/rte_vxlan.h
index bd1c89835e..f59829b182 100644
--- a/lib/net/rte_vxlan.h
+++ b/lib/net/rte_vxlan.h
@@ -27,13 +27,13 @@
  * Reserved fields (24 bits and 8 bits)
  */
 __extension__ /* no named member in struct */
-struct rte_vxlan_hdr {
+struct __rte_packed_begin rte_vxlan_hdr {
 	union {
 		rte_be32_t vx_flags; /**< flags (8 bits) + extensions (24 bits). */
-		struct {
+		struct __rte_packed_begin {
 			union {
 				uint8_t flags; /**< Default is I bit, others are extensions. */
-				struct {
+				struct __rte_packed_begin {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 					uint8_t flag_g:1,     /**< GBP bit. */
 						flag_rsvd:1,  /*   Reserved. */
@@ -51,11 +51,11 @@ struct rte_vxlan_hdr {
 						flag_rsvd:1,
 						flag_g:1;
 #endif
-				} __rte_packed;
+				} __rte_packed_end;
 			}; /* end of 1st byte */
 			union {
 				uint8_t rsvd0[3]; /* Reserved for extensions. */
-				struct {
+				struct __rte_packed_begin {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 					uint8_t rsvd0_gbp1:1, /*   Reserved. */
 						flag_d:1,     /**< GBP Don't Learn bit. */
@@ -71,7 +71,7 @@ struct rte_vxlan_hdr {
 #endif
 					union {
 						uint16_t policy_id; /**< GBP Identifier. */
-						struct {
+						struct __rte_packed_begin {
 							uint8_t rsvd0_gpe; /* Reserved. */
 							uint8_t proto; /**< GPE Next protocol. */
 								/* 0x01 : IPv4
@@ -79,23 +79,23 @@ struct rte_vxlan_hdr {
 								 * 0x03 : Ethernet
 								 * 0x04 : Network Service Header
 								 */
-						} __rte_packed;
+						} __rte_packed_end;
 					};
-				} __rte_packed;
+				} __rte_packed_end;
 			};
-		} __rte_packed;
+		} __rte_packed_end;
 	}; /* end of 1st 32-bit word */
 	union {
 		rte_be32_t vx_vni; /**< VNI (24 bits) + reserved (8 bits). */
-		struct {
+		struct __rte_packed_begin {
 			uint8_t    vni[3];   /**< VXLAN Identifier. */
 			union {
 				uint8_t    rsvd1;        /**< Reserved. */
 				uint8_t    last_rsvd;    /**< Reserved. */
 			};
-		} __rte_packed;
+		} __rte_packed_end;
 	}; /* end of 2nd 32-bit word */
-} __rte_packed;
+} __rte_packed_end;
 
 /** VXLAN tunnel header length. */
 #define RTE_ETHER_VXLAN_HLEN \
@@ -111,7 +111,7 @@ struct rte_vxlan_hdr {
  * Identifier and Reserved fields (16 bits and 8 bits).
  */
 __extension__ /* no named member in struct */
-struct rte_vxlan_gpe_hdr {
+struct __rte_packed_begin rte_vxlan_gpe_hdr {
 	union {
 		struct {
 			uint8_t vx_flags;    /**< flag (8). */
@@ -127,7 +127,7 @@ struct rte_vxlan_gpe_hdr {
 			uint8_t rsvd1;    /**< Reserved. */
 		};
 	};
-} __rte_packed;
+} __rte_packed_end;
 
 /**
  * @deprecated
-- 
2.47.0.vfs.0.3


^ permalink raw reply	[relevance 1%]

* Re: [PATCH RESEND v7 2/5] ethdev: fix skip valid port in probing callback
  2025-01-10 17:54  3%         ` Stephen Hemminger
@ 2025-01-13  2:32  0%           ` lihuisong (C)
  0 siblings, 0 replies; 169+ results
From: lihuisong (C) @ 2025-01-13  2:32 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: dev, fengchengwen, liuyonglong, andrew.rybchenko, Somnath Kotur,
	Ajit Khaparde, Dariusz Sosnowski, Suanming Mou, Matan Azrad,
	Ori Kam, Viacheslav Ovsiienko, ferruh.yigit, thomas


在 2025/1/11 1:54, Stephen Hemminger 写道:
> On Fri, 10 Jan 2025 11:21:26 +0800
> "lihuisong (C)" <lihuisong@huawei.com> wrote:
>
>> Hi Stephen,
>>
>> Can you take a look at my below reply and reconsider this patch?
>>
>> /Huisong
>>
>> 在 2024/12/10 9:50, lihuisong (C) 写道:
>>> Hi Ferruh, Stephen and Thomas,
>>>
>>> Can you take a look at this patch? After all, it is an issue in ethdev
>>> layer.
>>> This also is the fruit we disscussed with Thomas and Ferruh before.
>>> Please go back to this thread. If we don't need this patch, please let
>>> me know. I will drop it from my upstreaming list.
>>>
>>> /Huisong
>>>
>>>
>>> 在 2024/9/29 13:52, Huisong Li 写道:
>>>> The event callback in application may use the macro
>>>> RTE_ETH_FOREACH_DEV to
>>>> iterate over all enabled ports to do something(like, verifying the
>>>> port id
>>>> validity) when receive a probing event. If the ethdev state of a port is
>>>> not RTE_ETH_DEV_UNUSED, this port will be considered as a valid port.
>>>>
>>>> However, this state is set to RTE_ETH_DEV_ATTACHED after pushing probing
>>>> event. It means that probing callback will skip this port. But this
>>>> assignment can not move to front of probing notification. See
>>>> commit be8cd210379a ("ethdev: fix port probing notification")
>>>>
>>>> So this patch has to add a new state, RTE_ETH_DEV_ALLOCATED. Set the
>>>> ethdev
>>>> state to RTE_ETH_DEV_ALLOCATED before pushing probing event and set
>>>> it to
>>>> RTE_ETH_DEV_ATTACHED after definitely probed. And this port is valid
>>>> if its
>>>> device state is 'ALLOCATED' or 'ATTACHED'.
>>>>
>>>> In addition, the new state has to be placed behind 'REMOVED' to avoid
>>>> ABI
>>>> break. Fortunately, this ethdev state is internal and applications
>>>> can not
>>>> access it directly. So this patch encapsulates an API,
>>>> rte_eth_dev_is_used,
>>>> for ethdev or PMD to call and eliminate concerns about using this state
>>>> enum value comparison.
>>>>
>>>> Fixes: be8cd210379a ("ethdev: fix port probing notification")
>>>> Cc: stable@dpdk.org
>>>>
>>>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
>>>> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
>>>> ---
>>>>    drivers/net/bnxt/bnxt_ethdev.c |  3 ++-
>>>>    drivers/net/mlx5/mlx5.c        |  2 +-
>>>>    lib/ethdev/ethdev_driver.c     | 13 ++++++++++---
>>>>    lib/ethdev/ethdev_driver.h     | 12 ++++++++++++
>>>>    lib/ethdev/ethdev_pci.h        |  2 +-
>>>>    lib/ethdev/rte_class_eth.c     |  2 +-
>>>>    lib/ethdev/rte_ethdev.c        |  4 ++--
>>>>    lib/ethdev/rte_ethdev.h        |  4 +++-
>>>>    lib/ethdev/version.map         |  1 +
>>>>    9 files changed, 33 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/drivers/net/bnxt/bnxt_ethdev.c
>>>> b/drivers/net/bnxt/bnxt_ethdev.c
>>>> index c6ad764813..7401dcd8b5 100644
>>>> --- a/drivers/net/bnxt/bnxt_ethdev.c
>>>> +++ b/drivers/net/bnxt/bnxt_ethdev.c
>>>> @@ -6612,7 +6612,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
>>>>          PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
>>>>    -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
>>>> +
>>>> +    if (rte_eth_dev_is_used(eth_dev->state))
>>>>            bnxt_dev_close_op(eth_dev);
>>>>          return 0;
>>>> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
>>>> index 8d266b0e64..0df49e1f69 100644
>>>> --- a/drivers/net/mlx5/mlx5.c
>>>> +++ b/drivers/net/mlx5/mlx5.c
>>>> @@ -3371,7 +3371,7 @@ mlx5_eth_find_next(uint16_t port_id, struct
>>>> rte_device *odev)
>>>>        while (port_id < RTE_MAX_ETHPORTS) {
>>>>            struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>>>>    -        if (dev->state != RTE_ETH_DEV_UNUSED &&
>>>> +        if (rte_eth_dev_is_used(dev->state) &&
>>>>                dev->device &&
>>>>                (dev->device == odev ||
>>>>                 (dev->device->driver &&
>>>> diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
>>>> index c335a25a82..a87dbb00ff 100644
>>>> --- a/lib/ethdev/ethdev_driver.c
>>>> +++ b/lib/ethdev/ethdev_driver.c
>>>> @@ -55,8 +55,8 @@ eth_dev_find_free_port(void)
>>>>        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
>>>>            /* Using shared name field to find a free port. */
>>>>            if (eth_dev_shared_data->data[i].name[0] == '\0') {
>>>> -            RTE_ASSERT(rte_eth_devices[i].state ==
>>>> -                   RTE_ETH_DEV_UNUSED);
>>>> +            RTE_ASSERT(!rte_eth_dev_is_used(
>>>> +                    rte_eth_devices[i].state));
>>>>                return i;
>>>>            }
>>>>        }
>>>> @@ -221,11 +221,18 @@ rte_eth_dev_probing_finish(struct rte_eth_dev
>>>> *dev)
>>>>        if (rte_eal_process_type() == RTE_PROC_SECONDARY)
>>>>            eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id,
>>>> dev);
>>>>    +    dev->state = RTE_ETH_DEV_ALLOCATED;
>>>>        rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
>>>>          dev->state = RTE_ETH_DEV_ATTACHED;
>>>>    }
>>>>    +bool rte_eth_dev_is_used(uint16_t dev_state)
>>>> +{
>>>> +    return dev_state == RTE_ETH_DEV_ALLOCATED ||
>>>> +        dev_state == RTE_ETH_DEV_ATTACHED;
>>>> +}
>>>> +
>>>>    int
>>>>    rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
>>>>    {
>>>> @@ -243,7 +250,7 @@ rte_eth_dev_release_port(struct rte_eth_dev
>>>> *eth_dev)
>>>>        if (ret != 0)
>>>>            return ret;
>>>>    -    if (eth_dev->state != RTE_ETH_DEV_UNUSED)
>>>> +    if (rte_eth_dev_is_used(eth_dev->state))
>>>>            rte_eth_dev_callback_process(eth_dev,
>>>>                    RTE_ETH_EVENT_DESTROY, NULL);
>>>>    diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
>>>> index abed4784aa..aa35b65848 100644
>>>> --- a/lib/ethdev/ethdev_driver.h
>>>> +++ b/lib/ethdev/ethdev_driver.h
>>>> @@ -1704,6 +1704,18 @@ int rte_eth_dev_callback_process(struct
>>>> rte_eth_dev *dev,
>>>>    __rte_internal
>>>>    void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
>>>>    +/**
>>>> + * Check if a Ethernet device state is used or not
>>>> + *
>>>> + * @param dev_state
>>>> + *   The state of the Ethernet device
>>>> + * @return
>>>> + *   - true if the state of the Ethernet device is allocated or
>>>> attached
>>>> + *   - false if this state is neither allocated nor attached
>>>> + */
>>>> +__rte_internal
>>>> +bool rte_eth_dev_is_used(uint16_t dev_state);
>>>> +
>>>>    /**
>>>>     * Create memzone for HW rings.
>>>>     * malloc can't be used as the physical address is needed.
>>>> diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
>>>> index ec4f731270..05dec6716b 100644
>>>> --- a/lib/ethdev/ethdev_pci.h
>>>> +++ b/lib/ethdev/ethdev_pci.h
>>>> @@ -179,7 +179,7 @@ rte_eth_dev_pci_generic_remove(struct
>>>> rte_pci_device *pci_dev,
>>>>         * eth device has been released.
>>>>         */
>>>>        if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
>>>> -        eth_dev->state == RTE_ETH_DEV_UNUSED)
>>>> +        !rte_eth_dev_is_used(eth_dev->state))
>>>>            return 0;
>>>>          if (dev_uninit) {
>>>> diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
>>>> index b52f1dd9f2..81e70670d9 100644
>>>> --- a/lib/ethdev/rte_class_eth.c
>>>> +++ b/lib/ethdev/rte_class_eth.c
>>>> @@ -118,7 +118,7 @@ eth_dev_match(const struct rte_eth_dev *edev,
>>>>        const struct rte_kvargs *kvlist = arg->kvlist;
>>>>        unsigned int pair;
>>>>    -    if (edev->state == RTE_ETH_DEV_UNUSED)
>>>> +    if (!rte_eth_dev_is_used(edev->state))
>>>>            return -1;
>>>>        if (arg->device != NULL && arg->device != edev->device)
>>>>            return -1;
>>>> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
>>>> index a1f7efa913..4dc66abb7b 100644
>>>> --- a/lib/ethdev/rte_ethdev.c
>>>> +++ b/lib/ethdev/rte_ethdev.c
>>>> @@ -349,7 +349,7 @@ uint16_t
>>>>    rte_eth_find_next(uint16_t port_id)
>>>>    {
>>>>        while (port_id < RTE_MAX_ETHPORTS &&
>>>> -            rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
>>>> + !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>>>>            port_id++;
>>>>          if (port_id >= RTE_MAX_ETHPORTS)
>>>> @@ -408,7 +408,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
>>>>        int is_valid;
>>>>          if (port_id >= RTE_MAX_ETHPORTS ||
>>>> -        (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
>>>> +        !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
>>>>            is_valid = 0;
>>>>        else
>>>>            is_valid = 1;
>>>> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
>>>> index a9f92006da..9cc37e8cde 100644
>>>> --- a/lib/ethdev/rte_ethdev.h
>>>> +++ b/lib/ethdev/rte_ethdev.h
>>>> @@ -2083,10 +2083,12 @@ typedef uint16_t
>>>> (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
>>>>    enum rte_eth_dev_state {
>>>>        /** Device is unused before being probed. */
>>>>        RTE_ETH_DEV_UNUSED = 0,
>>>> -    /** Device is attached when allocated in probing. */
>>>> +    /** Device is attached when definitely probed. */
>>>>        RTE_ETH_DEV_ATTACHED,
>>>>        /** Device is in removed state when plug-out is detected. */
>>>>        RTE_ETH_DEV_REMOVED,
>>>> +    /** Device is allocated and is set before reporting new event. */
>>>> +    RTE_ETH_DEV_ALLOCATED,
>>>>    };
>>>>      struct rte_eth_dev_sriov {
>>>> diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
>>>> index f63dc32aa2..6ecf1ab89d 100644
>>>> --- a/lib/ethdev/version.map
>>>> +++ b/lib/ethdev/version.map
>>>> @@ -349,6 +349,7 @@ INTERNAL {
>>>>        rte_eth_dev_get_by_name;
>>>>        rte_eth_dev_is_rx_hairpin_queue;
>>>>        rte_eth_dev_is_tx_hairpin_queue;
>>>> +    rte_eth_dev_is_used;
>>>>        rte_eth_dev_probing_finish;
>>>>        rte_eth_dev_release_port;
>>>>        rte_eth_dev_internal_reset;
>>> .
> Please resubmit for 25.03 release.
> But it looks like an API/ABI change since rte_eth_dev_state is visible
> to applications.
>
> A more detailed bug report would also help
> .

ok,many thanks for your reply.

I will resubmit this patch and send out separately.

And this series that testpmd add attach and detach port for multiple 
process will be updated later.


^ permalink raw reply	[relevance 0%]

* [PATCH v1 2/2] ethdev: fix skip valid port in probing callback
  @ 2025-01-13  2:55  2% ` Huisong Li
  0 siblings, 0 replies; 169+ results
From: Huisong Li @ 2025-01-13  2:55 UTC (permalink / raw)
  To: dev, stephen, thomas, ferruh.yigit, Ajit Khaparde, Somnath Kotur,
	Praveen Shetty, Andrew Boyer, Dariusz Sosnowski,
	Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou,
	Matan Azrad, Chaoyong He, Andrew Rybchenko
  Cc: fengchengwen, liuyonglong, lihuisong

The event callback in application may use the macro RTE_ETH_FOREACH_DEV to
iterate over all enabled ports to do something(like, verifying the port id
validity) when receive a probing event. If the ethdev state of a port is
not RTE_ETH_DEV_UNUSED, this port will be considered as a valid port.

However, this state is set to RTE_ETH_DEV_ATTACHED after pushing probing
event. It means that probing callback will skip this port. But this
assignment can not move to front of probing notification. See
commit be8cd210379a ("ethdev: fix port probing notification")

So this patch has to add a new state, RTE_ETH_DEV_ALLOCATED. Set the ethdev
state to RTE_ETH_DEV_ALLOCATED before pushing probing event and set it to
RTE_ETH_DEV_ATTACHED after definitely probed. And this port is valid if its
device state is 'ALLOCATED' or 'ATTACHED'.

In addition, the new state has to be placed behind 'REMOVED' to avoid ABI
break. Fortunately, this ethdev state is internal and applications can not
access it directly. So this patch encapsulates an API, rte_eth_dev_is_used,
for ethdev or PMD to call and eliminate concerns about using this state
enum value comparison.

Fixes: be8cd210379a ("ethdev: fix port probing notification")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/net/bnxt/bnxt_ethdev.c   |  2 +-
 drivers/net/cpfl/cpfl_ethdev.h   |  2 +-
 drivers/net/ionic/ionic_ethdev.c |  2 +-
 drivers/net/mlx5/mlx5.c          |  2 +-
 drivers/net/nfp/nfp_ethdev.c     |  4 ++--
 lib/ethdev/ethdev_driver.c       | 13 ++++++++++---
 lib/ethdev/ethdev_driver.h       | 12 ++++++++++++
 lib/ethdev/ethdev_pci.h          |  2 +-
 lib/ethdev/rte_class_eth.c       |  2 +-
 lib/ethdev/rte_ethdev.c          |  4 ++--
 lib/ethdev/rte_ethdev.h          |  4 +++-
 lib/ethdev/version.map           |  1 +
 12 files changed, 36 insertions(+), 14 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index ef8a928c91..1441194b85 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -6706,7 +6706,7 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
 
 	PMD_DRV_LOG_LINE(DEBUG, "Calling Device uninit");
 
-	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
+	if (rte_eth_dev_is_used(eth_dev->state))
 		bnxt_dev_close_op(eth_dev);
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9a38a69194..aad05aafd6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -328,7 +328,7 @@ cpfl_get_itf_by_port_id(uint16_t port_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (dev->state == RTE_ETH_DEV_UNUSED) {
+	if (!rte_eth_dev_is_used(dev->state)) {
 		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
 		return NULL;
 	}
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index aa22b6a70d..2a4e565c4f 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -1109,7 +1109,7 @@ eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
-	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
+	if (rte_eth_dev_is_used(eth_dev->state))
 		ionic_dev_close(eth_dev);
 
 	eth_dev->dev_ops = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6e4473e2f4..642e762868 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -3376,7 +3376,7 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
 	while (port_id < RTE_MAX_ETHPORTS) {
 		struct rte_eth_dev *dev = &rte_eth_devices[port_id];
 
-		if (dev->state != RTE_ETH_DEV_UNUSED &&
+		if (rte_eth_dev_is_used(dev->state) &&
 		    dev->device &&
 		    (dev->device == odev ||
 		     (dev->device->driver &&
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index df5482f74a..dae4594e56 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -754,11 +754,11 @@ nfp_net_close(struct rte_eth_dev *dev)
 	/*
 	 * In secondary process, a released eth device can be found by its name
 	 * in shared memory.
-	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
+	 * If the state of the eth device isn't used, it means the
 	 * eth device has been released.
 	 */
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (dev->state == RTE_ETH_DEV_UNUSED)
+		if (!rte_eth_dev_is_used(dev->state))
 			return 0;
 
 		nfp_pf_secondary_uninit(hw_priv);
diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
index 9afef06431..5537c2f7af 100644
--- a/lib/ethdev/ethdev_driver.c
+++ b/lib/ethdev/ethdev_driver.c
@@ -55,8 +55,8 @@ eth_dev_find_free_port(void)
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 		/* Using shared name field to find a free port. */
 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
-			RTE_ASSERT(rte_eth_devices[i].state ==
-				   RTE_ETH_DEV_UNUSED);
+			RTE_ASSERT(!rte_eth_dev_is_used(
+					rte_eth_devices[i].state));
 			return i;
 		}
 	}
@@ -221,11 +221,18 @@ rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
 
+	dev->state = RTE_ETH_DEV_ALLOCATED;
 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
 
 	dev->state = RTE_ETH_DEV_ATTACHED;
 }
 
+bool rte_eth_dev_is_used(uint16_t dev_state)
+{
+	return dev_state == RTE_ETH_DEV_ALLOCATED ||
+		dev_state == RTE_ETH_DEV_ATTACHED;
+}
+
 int
 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
 {
@@ -243,7 +250,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
 	if (ret != 0)
 		return ret;
 
-	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
+	if (rte_eth_dev_is_used(eth_dev->state))
 		rte_eth_dev_callback_process(eth_dev,
 				RTE_ETH_EVENT_DESTROY, NULL);
 
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 1fd4562b40..dc496daf05 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -1754,6 +1754,18 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
 __rte_internal
 void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
 
+/**
+ * Check if a Ethernet device state is used or not
+ *
+ * @param dev_state
+ *   The state of the Ethernet device
+ * @return
+ *   - true if the state of the Ethernet device is allocated or attached
+ *   - false if this state is neither allocated nor attached
+ */
+__rte_internal
+bool rte_eth_dev_is_used(uint16_t dev_state);
+
 /**
  * Create memzone for HW rings.
  * malloc can't be used as the physical address is needed.
diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
index 2229ffa252..1e62f30d8d 100644
--- a/lib/ethdev/ethdev_pci.h
+++ b/lib/ethdev/ethdev_pci.h
@@ -179,7 +179,7 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev,
 	 * eth device has been released.
 	 */
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
-	    eth_dev->state == RTE_ETH_DEV_UNUSED)
+	    !rte_eth_dev_is_used(eth_dev->state))
 		return 0;
 
 	if (dev_uninit) {
diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
index a8d01e2595..f343c4b6eb 100644
--- a/lib/ethdev/rte_class_eth.c
+++ b/lib/ethdev/rte_class_eth.c
@@ -120,7 +120,7 @@ eth_dev_match(const struct rte_eth_dev *edev,
 	const struct rte_kvargs *kvlist = arg->kvlist;
 	unsigned int pair;
 
-	if (edev->state == RTE_ETH_DEV_UNUSED)
+	if (!rte_eth_dev_is_used(edev->state))
 		return -1;
 	if (arg->device != NULL && arg->device != edev->device)
 		return -1;
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 6413c54e3b..3d7a3c39d3 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -349,7 +349,7 @@ uint16_t
 rte_eth_find_next(uint16_t port_id)
 {
 	while (port_id < RTE_MAX_ETHPORTS &&
-			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
+	       !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
 		port_id++;
 
 	if (port_id >= RTE_MAX_ETHPORTS)
@@ -408,7 +408,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
 	int is_valid;
 
 	if (port_id >= RTE_MAX_ETHPORTS ||
-	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
+	    !rte_eth_dev_is_used(rte_eth_devices[port_id].state))
 		is_valid = 0;
 	else
 		is_valid = 1;
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 1f71cad244..f9a72b9883 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -2091,10 +2091,12 @@ typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
 enum rte_eth_dev_state {
 	/** Device is unused before being probed. */
 	RTE_ETH_DEV_UNUSED = 0,
-	/** Device is attached when allocated in probing. */
+	/** Device is attached when definitely probed. */
 	RTE_ETH_DEV_ATTACHED,
 	/** Device is in removed state when plug-out is detected. */
 	RTE_ETH_DEV_REMOVED,
+	/** Device is allocated and is set before reporting new event. */
+	RTE_ETH_DEV_ALLOCATED,
 };
 
 struct rte_eth_dev_sriov {
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 12f48c70a0..45b982e98d 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -351,6 +351,7 @@ INTERNAL {
 	rte_eth_dev_get_by_name;
 	rte_eth_dev_is_rx_hairpin_queue;
 	rte_eth_dev_is_tx_hairpin_queue;
+	rte_eth_dev_is_used;
 	rte_eth_dev_probing_finish;
 	rte_eth_dev_release_port;
 	rte_eth_dev_internal_reset;
-- 
2.22.0


^ permalink raw reply	[relevance 2%]

* Re: [PATCH v2] ring: add the second version of the RTS interface
  2025-01-08  1:41  3%   ` Huichao Cai
@ 2025-01-14 15:04  0%     ` Thomas Monjalon
  0 siblings, 0 replies; 169+ results
From: Thomas Monjalon @ 2025-01-14 15:04 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, honnappa.nagarahalli, konstantin.v.ananyev

08/01/2025 02:41, Huichao Cai:
> Hi,Thomas
>     This patch adds a field to the ABI structure.I have added the suppress_type
> field in the file libabigail.abignore, but "ci/github-robot: Build" still reported
> an error, could you please advise on how to fill in the suppress_type field?

You must check locally and see what happens when you add some suppressions.

You will find documentation here:
https://sourceware.org/libabigail/manual/libabigail-concepts.html#suppression-specifications



^ permalink raw reply	[relevance 0%]

Results 13601-13769 of 13769	 | reverse | sort options + mbox downloads above
-- links below jump to the message on this page --
2020-08-14 17:34     [dpdk-dev] [PATCH] eal: add option to put timestamp on console output Stephen Hemminger
2024-10-16 20:20     ` [PATCH v26 00/15] Log subsystem improvements Stephen Hemminger
2024-10-16 20:20  4%   ` [PATCH v26 15/15] doc: add release note about log library Stephen Hemminger
2024-10-24  3:18     ` [PATCH v27 00/14] Log subsystem changes Stephen Hemminger
2024-10-24  3:18  4%   ` [PATCH v27 14/14] doc: add release note about log library Stephen Hemminger
2024-10-24 19:02     ` [PATCH v28 00/13] Logging subsystem improvements Stephen Hemminger
2024-10-24 19:02  4%   ` [PATCH v28 13/13] doc: add release note about log library Stephen Hemminger
2024-10-25 21:45     ` [PATCH v29 00/13] Logging subsystem enhancements Stephen Hemminger
2024-10-25 21:45  4%   ` [PATCH v29 13/13] doc: add release note about log library Stephen Hemminger
2024-10-27 17:24     ` [PATCH v30 00/13] Log library enhancements Stephen Hemminger
2024-10-27 17:24  4%   ` [PATCH v30 13/13] doc: add release note about log library Stephen Hemminger
2023-07-05  8:48     [PATCH] eventdev: announce single-event enqueue/dequeue ABI change Mattias Rönnblom
2023-07-05 11:12     ` [PATCH v2] doc: " Mattias Rönnblom
2024-10-11 14:42       ` David Marchand
2024-10-13  6:57     ` Mattias Rönnblom
2024-10-14  7:18       ` Jerin Jacob
2024-10-14 14:40  4%     ` Thomas Monjalon
2024-10-14 14:44  4%     ` David Marchand
2024-03-20 10:55     [PATCH 0/2] introduce PM QoS interface Huisong Li
2024-09-12  2:38     ` [PATCH v10 0/2] power: " Huisong Li
2024-10-14 15:27  0%   ` Stephen Hemminger
2024-10-15  9:30  0%     ` lihuisong (C)
2024-10-21 11:42  4% ` [PATCH v11 " Huisong Li
2024-10-21 11:42  5%   ` [PATCH v11 1/2] power: introduce PM QoS API on CPU wide Huisong Li
2024-10-22  9:08  0%     ` Konstantin Ananyev
2024-10-22  9:41  0%       ` lihuisong (C)
2024-10-23  4:09  4% ` [PATCH v12 0/3] power: introduce PM QoS interface Huisong Li
2024-10-23  4:09  5%   ` [PATCH v12 1/3] power: introduce PM QoS API on CPU wide Huisong Li
2024-10-25  9:18  4% ` [PATCH v13 0/3] power: introduce PM QoS interface Huisong Li
2024-10-25  9:18  5%   ` [PATCH v13 1/3] power: introduce PM QoS API on CPU wide Huisong Li
2024-10-25 12:08  0%     ` Tummala, Sivaprasad
2024-10-29 13:28  4% ` [PATCH v14 0/3] power: introduce PM QoS interface Huisong Li
2024-10-29 13:28  5%   ` [PATCH v14 1/3] power: introduce PM QoS API on CPU wide Huisong Li
2024-11-04  9:13  0%   ` [PATCH v14 0/3] power: introduce PM QoS interface lihuisong (C)
2024-11-11  2:25  4% ` [PATCH v15 " Huisong Li
2024-11-11  2:25  5%   ` [PATCH v15 1/3] power: introduce PM QoS API on CPU wide Huisong Li
2024-11-11 10:29  0%   ` [PATCH v15 0/3] power: introduce PM QoS interface Thomas Monjalon
2024-11-11  9:14  4% ` [RESEND PATCH " Huisong Li
2024-11-11  9:14  5%   ` [RESEND PATCH v15 1/3] power: introduce PM QoS API on CPU wide Huisong Li
2024-03-20 21:05     [PATCH 00/15] fix packing of structs when building with MSVC Tyler Retzlaff
2024-12-31 18:37     ` [PATCH v8 00/29] " Andre Muezerie
2024-12-31 18:38       ` [PATCH v8 27/29] lib/net: replace packed attributes Andre Muezerie
2025-01-08 12:01  3%     ` David Marchand
2025-01-09  2:49  0%       ` Andre Muezerie
2025-01-09  2:45     ` [PATCH v10 00/30] fix packing of structs when building with MSVC Andre Muezerie
2025-01-09  2:46  1%   ` [PATCH v10 27/30] lib/net: replace packed attributes Andre Muezerie
2025-01-10 22:16     ` [PATCH v11 00/30] fix packing of structs when building with MSVC Andre Muezerie
2025-01-10 22:16  1%   ` [PATCH v11 27/30] net: replace packed attributes Andre Muezerie
2024-07-15 22:11     [RFC v2] ethdev: an API for cache stashing hints Wathsala Vithanage
2024-10-21  1:52     ` [RFC v3 0/2] An API for Stashing Packets into CPU caches Wathsala Vithanage
2024-10-21  1:52       ` [RFC v3 2/2] ethdev: introduce the cache stashing hints API Wathsala Vithanage
2024-12-03 21:13  3%     ` Stephen Hemminger
2024-12-05 15:40  3%       ` David Marchand
2024-12-05 21:00  0%         ` Stephen Hemminger
2024-07-20 16:50     [PATCH v1 0/4] power: refactor power management library Sivaprasad Tummala
2024-08-26 13:06     ` [PATCH v2 " Sivaprasad Tummala
2024-08-26 13:06       ` [PATCH v2 2/4] power: refactor uncore " Sivaprasad Tummala
2024-08-27 13:02         ` lihuisong (C)
2024-10-08  6:19           ` Tummala, Sivaprasad
2024-10-22  2:05  0%         ` lihuisong (C)
2024-08-21 16:25     [PATCH dpdk v1 00/15] IPv6 APIs overhaul Robin Jarry
2024-10-18  9:17     ` [PATCH dpdk v4 00/17] " Robin Jarry
2024-10-18  9:17  1%   ` [PATCH dpdk v4 04/17] net: use IPv6 structure for packet headers Robin Jarry
2024-10-18  9:17  1%   ` [PATCH dpdk v4 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
2024-10-18  9:17  2%   ` [PATCH dpdk v4 07/17] rib6: " Robin Jarry
2024-10-18 14:05     ` [PATCH dpdk v5 00/17] IPv6 APIs overhaul Robin Jarry
2024-10-18 14:05  1%   ` [PATCH dpdk v5 04/17] net: use IPv6 structure for packet headers Robin Jarry
2024-10-18 14:05  1%   ` [PATCH dpdk v5 05/17] lpm6: use IPv6 address structure and utils Robin Jarry
2024-10-18 14:05  2%   ` [PATCH dpdk v5 07/17] rib6: " Robin Jarry
2024-09-05 10:14     [PATCH] [RFC] cryptodev: replace LIST_END enumerators with APIs Akhil Goyal
2024-10-04  3:54     ` Ferruh Yigit
2024-10-04  7:04       ` David Marchand
2024-10-10  0:49         ` Ferruh Yigit
2024-10-10  6:18           ` [EXTERNAL] " Akhil Goyal
2024-10-28 11:15  3%         ` Dodji Seketeli
2024-10-04  9:38       ` Dodji Seketeli
2024-10-04 17:45         ` [EXTERNAL] " Akhil Goyal
2024-10-28 10:55  4%       ` Dodji Seketeli
2024-10-10  0:35         ` Ferruh Yigit
2024-10-28 10:12  4%       ` Dodji Seketeli
2024-09-07  7:31     [RFC PATCH 0/3] add feature arc in rte_graph Nitin Saxena
2024-10-08  8:04     ` David Marchand
2024-10-14 11:11       ` [EXTERNAL] " Nitin Saxena
2024-10-16  9:24  3%     ` David Marchand
2024-10-16  9:38  0%       ` Robin Jarry
2024-10-16 13:50  0%         ` Nitin Saxena
2024-10-17  7:03  0%           ` Nitin Saxena
2024-10-01 18:11     [PATCH v2 0/3] net: add thread-safe crc api Arkadiusz Kusztal
2024-10-01 18:11     ` [PATCH v2 1/3] " Arkadiusz Kusztal
2024-12-02 22:36  3%   ` Stephen Hemminger
2024-10-10 13:31     [PATCH v4 0/5] add feature arc in rte_graph Nitin Saxena
2024-10-14 14:33     ` [PATCH v5 " Nitin Saxena
2024-10-14 14:33  4%   ` [PATCH v5 2/5] graph: add feature arc option in graph create Nitin Saxena
2024-10-11  1:38     [PATCH] doc: correct definition of Stats per queue feature Stephen Hemminger
2024-10-11 19:25     ` Ferruh Yigit
2024-11-26 23:39  0%   ` Thomas Monjalon
2024-10-11  9:49     [PATCH v14 0/4] add support for self monitoring Tomasz Duszynski
2024-10-25  8:54     ` [PATCH v15 " Tomasz Duszynski
2024-10-25  8:54       ` [PATCH v15 4/4] eal: add PMU support to tracing library Tomasz Duszynski
2024-11-12 23:09  3%     ` Stephen Hemminger
2024-11-15 10:24  0%       ` [EXTERNAL] " Tomasz Duszynski
2024-11-18  7:37       ` [PATCH v16 0/4] add support for self monitoring Tomasz Duszynski
2024-11-18  7:37         ` [PATCH v16 1/4] lib: add generic support for reading PMU events Tomasz Duszynski
2024-12-06 18:15  3%       ` Konstantin Ananyev
2025-01-07  7:45  0%         ` Tomasz Duszynski
2024-10-14 11:58     [PATCH v5 0/3] Introduce node-specific errors in graph library pbhagavatula
2024-10-14 16:10     ` [PATCH v6 0/3] Introduce node-specific xstats " pbhagavatula
2024-10-14 16:10  3%   ` [PATCH v6 1/3] graph: add support for node specific xstats pbhagavatula
2024-10-15  5:42       ` [PATCH v7 0/3] Introduce node-specific xstats in graph library pbhagavatula
2024-10-15  5:42  3%     ` [PATCH v7 1/3] graph: add support for node specific xstats pbhagavatula
2024-10-15  2:49     [PATCH v4 0/5] power: refactor power management library Sivaprasad Tummala
2024-10-17 10:26     ` [PATCH v5 " Sivaprasad Tummala
2024-10-17 16:17  3%   ` Stephen Hemminger
2024-10-15  8:49  3% [RFC 00/10] eventdev: remove single-event enqueue and dequeue Mattias Rönnblom
2024-10-15  8:49  8% ` [RFC 10/10] eventdev: remove single event " Mattias Rönnblom
2024-10-15 17:07  0% ` [RFC 00/10] eventdev: remove single-event " Stephen Hemminger
2024-10-15 18:38  0%   ` Mattias Rönnblom
2024-10-15  8:49     [RFC 01/10] event/dsw: remove single event " Mattias Rönnblom
2024-10-15 18:25  3% ` [RFC v2 00/10] eventdev: remove single-event " Mattias Rönnblom
2024-10-15 18:25  7%   ` [RFC v2 10/10] eventdev: remove single event " Mattias Rönnblom
2024-10-15 22:00  0%     ` Stephen Hemminger
2024-10-16  4:36  0%       ` Mattias Rönnblom
2024-10-16  6:20  0%         ` Mattias Rönnblom
2024-10-16 14:14  3%     ` Jerin Jacob
2024-10-15 12:10     [PATCH 0/3] Enhance headers check David Marchand
2024-10-15 12:10  3% ` [PATCH 1/3] bitops: fix build for GCC without experimental API David Marchand
2024-10-15 12:47  0%   ` Morten Brørup
2024-10-16 11:38     ` [PATCH v2 0/4] Enhance headers check David Marchand
2024-10-16 11:38  3%   ` [PATCH v2 1/4] bitops: fix build for GCC without experimental API David Marchand
2024-10-15 16:54  2% Invitation: Adding support for PCIe steering tags in DPDK Data Plane Development Kit - Meetings
2024-10-15 16:54  2% Updated " Data Plane Development Kit - Meetings
2024-10-15 18:25     [RFC v2 01/10] event/dsw: remove single event enqueue and dequeue Mattias Rönnblom
2024-10-17  6:38  3% ` [RFC v3 00/10] eventdev: remove single-event " Mattias Rönnblom
2024-10-17  6:38 11%   ` [RFC v3 10/10] eventdev: remove single event " Mattias Rönnblom
2024-10-21  7:25  0%   ` [RFC v3 00/10] eventdev: remove single-event " Jerin Jacob
2024-10-21  8:38  0%     ` Mattias Rönnblom
2024-10-21  8:51  3%   ` [PATCH " Mattias Rönnblom
2024-10-21  8:51 11%     ` [PATCH 10/10] eventdev: remove single event " Mattias Rönnblom
2024-10-21  9:21  0%     ` [PATCH 00/10] eventdev: remove single-event " Mattias Rönnblom
2024-10-21  9:06  3%   ` Mattias Rönnblom
2024-10-21  9:06 11%     ` [PATCH 10/10] eventdev: remove single event " Mattias Rönnblom
2024-10-17  6:15     [PATCH 0/6] Adjust format and level of log Chaoyong He
2024-10-17  6:15  3% ` [PATCH 5/6] net/nfp: reformat the period of logs Chaoyong He
2024-10-17 15:22  2% Updated Invitation: Adding support for PCIe steering tags in DPDK Data Plane Development Kit - Meetings
2024-10-17 19:56  3% DPDK - PCIe Steering Tags Meeting on 10/23/24 Wathsala Wathawana Vithanage
2024-10-21  2:05  0% ` Wathsala Wathawana Vithanage
2024-10-17 21:32  3% Community Call for Adding Support of PCIe Steering Tags Support in DPDK Wathsala Wathawana Vithanage
2024-10-17 22:58     [PATCH 0/2] gpudev: annotate memory allocation Stephen Hemminger
2024-11-09  0:22  3% ` Stephen Hemminger
2024-10-18 21:47  4% release candidate 24.11-rc1 Thomas Monjalon
2024-10-29 10:19  0% ` Xu, HailinX
2024-10-29 19:31  0% ` Thinh Tran
2024-10-20  9:22     [PATCH v6 0/5] power: refactor power management library Sivaprasad Tummala
2024-10-21  4:07     ` [PATCH v7 " Sivaprasad Tummala
2024-10-21  4:07       ` [PATCH v7 1/5] power: refactor core " Sivaprasad Tummala
2024-10-22  3:03  3%     ` lihuisong (C)
2024-10-22  7:13  0%       ` Tummala, Sivaprasad
2024-10-22  8:36  0%         ` lihuisong (C)
2024-10-22 19:05  3% [PATCH v6 0/3] add ec points to sm2 op Arkadiusz Kusztal
2024-10-22 19:05  5% ` [PATCH v6 1/3] cryptodev: " Arkadiusz Kusztal
2024-10-23  1:19  0% ` [PATCH v6 0/3] " Stephen Hemminger
2024-10-23  8:19  3% [PATCH v7 " Arkadiusz Kusztal
2024-10-23  8:19  5% ` [PATCH v7 1/3] cryptodev: " Arkadiusz Kusztal
2024-10-28  9:18     [PATCH V2 7/7] mlx5: add backward compatibility for RDMA monitor Minggang Li(Gavin)
2024-10-29 13:42     ` [PATCH V3 0/7] port probe time optimization Minggang Li(Gavin)
2024-10-29 13:42       ` [PATCH V3 7/7] mlx5: add backward compatibility for RDMA monitor Minggang Li(Gavin)
2024-10-29 16:26  3%     ` Stephen Hemminger
2024-10-30  8:25  0%       ` Minggang(Gavin) Li
2024-10-30  8:19     [PATCH 0/3] NFP PMD enhancement Chaoyong He
2024-10-30  8:19  6% ` [PATCH 3/3] net/nfp: add support for port identify Chaoyong He
2024-10-30  8:27     ` [PATCH 0/4] NFP PMD enhancement Chaoyong He
2024-10-30  8:27  6%   ` [PATCH 4/4] net/nfp: add support for port identify Chaoyong He
2024-11-01  2:57       ` [PATCH v3 0/4] NFP PMD enhancement Chaoyong He
2024-11-01  2:57  6%     ` [PATCH v3 4/4] net/nfp: add support for port identify Chaoyong He
2024-11-04  1:34         ` [PATCH v4 0/4] NFP PMD enhancement Chaoyong He
2024-11-04  1:34  6%       ` [PATCH v4 4/4] net/nfp: add LED support Chaoyong He
2024-11-04  9:36  3% [PATCH v8 0/3] add ec points to sm2 op Arkadiusz Kusztal
2024-11-04  9:36  5% ` [PATCH v8 1/3] cryptodev: " Arkadiusz Kusztal
2024-11-06 10:08  0%   ` [EXTERNAL] " Akhil Goyal
2024-11-06 15:17  0%     ` Kusztal, ArkadiuszX
2024-11-07  8:04     [PATCH] graph: optimize graph search when scheduling nodes Huichao cai
2024-11-07  9:37  3% ` [EXTERNAL] " Jerin Jacob
2024-11-08  1:39  4%   ` Huichao Cai
2024-11-08 12:22  3%     ` Jerin Jacob
2024-11-08 13:38  0%       ` David Marchand
2024-11-11  5:38  0%         ` Jerin Jacob
2024-11-12  8:51  0%           ` David Marchand
2024-11-12  9:35  3%             ` Jerin Jacob
2024-11-12 12:57  0%               ` Huichao Cai
2024-11-11  4:03     ` [PATCH v2] graph: mcore: optimize graph search Huichao Cai
2024-11-11  5:46  3%   ` [EXTERNAL] " Jerin Jacob
2024-11-13  7:35  5%   ` [PATCH v3 1/2] " Huichao Cai
2024-11-13  7:35  5%     ` [PATCH v3 2/2] graph: add alignment to the member of rte_node Huichao Cai
2024-11-14  7:14  0%       ` [EXTERNAL] " Jerin Jacob
2024-11-14  8:45  5%     ` [PATCH v4 1/2] graph: mcore: optimize graph search Huichao Cai
2024-11-14  8:45  5%       ` [PATCH v4 2/2] graph: add alignment to the member of rte_node Huichao Cai
2024-11-14 10:05  0%         ` [EXTERNAL] " Jerin Jacob
2024-11-15  1:55  5%         ` [PATCH v5 1/1] graph: improve node layout Huichao Cai
2024-11-15 14:23  0%           ` Thomas Monjalon
2024-11-15 15:57  0%             ` [EXTERNAL] " Jerin Jacob
2024-12-13  2:21 10%       ` [PATCH v5] graph: mcore: optimize graph search Huichao Cai
2024-12-13 14:36  3%         ` David Marchand
2024-12-16  1:43 11%         ` [PATCH v6] " Huichao Cai
2024-12-16 14:49  4%           ` David Marchand
2024-12-17  9:04  0%             ` David Marchand
2024-11-08 18:17     [PATCH] config: limit lcore variable maximum size to 4k David Marchand
2024-11-08 18:35     ` Morten Brørup
2024-11-08 19:53       ` Morten Brørup
2024-11-08 22:13         ` Thomas Monjalon
2024-11-08 22:49  3%       ` Morten Brørup
2024-11-11 12:52  5% [PATCH] power: fix a typo in the PM QoS guide Huisong Li
2024-11-12  8:35  5% ` [PATCH v2] " Huisong Li
2024-11-13  0:59  5% ` [PATCH v3] " Huisong Li
2024-11-12  9:31     rte_fib network order bug Robin Jarry
2024-11-13 10:42     ` Medvedkin, Vladimir
2024-11-13 13:27       ` Robin Jarry
2024-11-13 19:39         ` Medvedkin, Vladimir
2024-11-14  7:43           ` Morten Brørup
2024-11-14 10:18             ` Robin Jarry
2024-11-14 14:35               ` Morten Brørup
2024-11-15 13:01                 ` Robin Jarry
2024-11-15 13:52  3%               ` Morten Brørup
2024-11-15 14:28  3%                 ` Robin Jarry
2024-11-15 16:20  0%                   ` Stephen Hemminger
2024-11-17 15:04  3%                     ` Vladimir Medvedkin
2024-11-20 22:24  3% Tech Board Meeting Minutes - 2024-Nov-13 Honnappa Nagarahalli
2024-11-26 13:14  3% [PATCH v1 0/4] Adjust wording for NUMA vs. socket ID in DPDK Anatoly Burakov
2024-11-27 10:03     rte_event_eth_tx_adapter_enqueue() short enqueue Mattias Rönnblom
2024-11-27 10:38     ` Bruce Richardson
2024-11-27 10:53       ` Mattias Rönnblom
2024-11-27 11:07         ` Bruce Richardson
2024-12-19 15:59           ` Morten Brørup
2024-12-19 17:12  3%         ` Bruce Richardson
2024-11-28 17:07  4% [PATCH v1] doc: update release notes for 24.11 John McNamara
2024-11-30 23:50  4% DPDK 24.11 released Thomas Monjalon
2024-12-03  7:54 11% [PATCH] version: 25.03-rc0 David Marchand
2024-12-04 10:06  3% ` Thomas Monjalon
2024-12-04 12:05  3%   ` David Marchand
2024-12-05 17:57     [PATCH 0/3] Defer lcore variables allocation David Marchand
2024-12-06 11:01     ` Mattias Rönnblom
2024-12-09 11:03       ` David Marchand
2024-12-09 15:39         ` Mattias Rönnblom
2024-12-09 17:40  3%       ` David Marchand
2024-12-10  9:41  0%         ` Mattias Rönnblom
     [not found]     <20220825024425.10534-1-lihuisong@huawei.com>
2024-09-29  5:52     ` [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port Huisong Li
2024-09-29  5:52       ` [PATCH RESEND v7 2/5] ethdev: fix skip valid port in probing callback Huisong Li
2024-12-10  1:50  0%     ` lihuisong (C)
2025-01-10  3:21  0%       ` lihuisong (C)
2025-01-10 17:54  3%         ` Stephen Hemminger
2025-01-13  2:32  0%           ` lihuisong (C)
2024-10-08  2:32       ` [PATCH RESEND v7 0/5] app/testpmd: support multiple process attach and detach port lihuisong (C)
2024-10-18  1:04  0%     ` Ferruh Yigit
2024-10-18  2:48  0%       ` lihuisong (C)
2024-10-26  4:11  0%         ` lihuisong (C)
2024-10-29 22:12  0%         ` Ferruh Yigit
2024-10-30  4:06  0%           ` lihuisong (C)
2024-12-16  4:14  4% DTS WG Meeting Minutes - December 5, 2024 Patrick Robb
2024-12-16  4:18  3% Community CI Meeting Minutes - December 12, 2024 Patrick Robb
2024-12-24  7:36     [v1 00/16] crypto/virtio: vDPA and asymmetric support Gowrishankar Muthukrishnan
2024-12-24  7:37  1% ` [v1 15/16] crypto/virtio: add vhost backend to virtio_user Gowrishankar Muthukrishnan
2025-01-05  9:57  5% [PATCH] ring: add the second version of the RTS interface Huichao Cai
2025-01-05 15:13  5% ` [PATCH v2] " Huichao Cai
2025-01-08  1:41  3%   ` Huichao Cai
2025-01-14 15:04  0%     ` Thomas Monjalon
2025-01-05 15:09  5% Huichao Cai
2025-01-06 16:45     [PATCH 0/2] compile ipsec on Windows Andre Muezerie
2025-01-06 16:45     ` [PATCH 1/2] lib/ipsec: " Andre Muezerie
2025-01-09 15:31  3%   ` Konstantin Ananyev
2025-01-07 18:44     [v2 0/4] crypto/virtio: add vDPA backend support Gowrishankar Muthukrishnan
2025-01-07 18:44  1% ` [v2 3/4] crypto/virtio: add vhost backend to virtio_user Gowrishankar Muthukrishnan
2025-01-13  2:55     [PATCH v1 0/2] ethdev: fix skip valid port in probing callback Huisong Li
2025-01-13  2:55  2% ` [PATCH v1 2/2] " Huisong Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).