* [RFC PATCH v1 01/23] dts: merge DTS framework/plotgraph.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 02/23] dts: merge DTS framework/plotting.py " Juraj Linkeš
` (21 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/plotgraph.py | 842 +++++++++++++++++++++++++++++++++++++
1 file changed, 842 insertions(+)
create mode 100644 dts/framework/plotgraph.py
diff --git a/dts/framework/plotgraph.py b/dts/framework/plotgraph.py
new file mode 100644
index 0000000000..13b0203400
--- /dev/null
+++ b/dts/framework/plotgraph.py
@@ -0,0 +1,842 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import math
+
+import matplotlib as mp
+
+mp.use("Agg")
+import itertools
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+"""
+Generate graphs for each test suite
+
+TODO add 3d mesh graph interface
+"""
+# gap between the first bar graph and the x axis
+distanceFromXAxis = 0.2
+colors = itertools.cycle(
+ ["b", "g", "c", "#008000", "#008FF0", "#0080FF", "#008080", "#808000"]
+)
+
+colors = itertools.cycle(
+ [
+ "#f70202",
+ "#0f0b0b",
+ "#123eed",
+ "#07601b",
+ "#36f760",
+ "#87210d",
+ "#512f28",
+ "#11c6b1",
+ "#45f94e",
+ "#f94566",
+ ]
+)
+
+barcolors = itertools.cycle(
+ [
+ "#f70202",
+ "#0f0b0b",
+ "#123eed",
+ "#07601b",
+ "#36f760",
+ "#87210d",
+ "#512f28",
+ "#11c6b1",
+ "#45f94e",
+ "#f94566",
+ ]
+)
+
+expColors = itertools.cycle(["r", "m", "y"])
+graphNum = 0
+
+
+class ovGroup:
+ def __init__(self):
+ self.number = 0
+ self.numPipes = 0
+ self.tc0inputData = 0
+ self.tc3inputData = 0
+
+
+class graphit2d(object):
+ def __init__(self):
+ self.graphType = ""
+ self.title = ""
+ self.xLabel = ""
+ self.yLabel = ""
+ self.xticks = []
+ self.yticks = []
+
+ self.xs = []
+ self.ys = []
+ self.expectedXs = []
+ self.expectedYs = []
+
+ self.barNames = []
+ self.barLegends = []
+ self.barData = []
+ self.barGraphXTitle = ""
+ self.barWidth = 0.35
+ self.isLineRate = 0
+
+
+class Plot2DGraph:
+ def __init__(self):
+ self.numSubPlots = 0
+ self.plotName = ""
+ self.graphs = []
+ self.fig = []
+ self.graphType = ""
+ self.numPlots = 1
+ self.hasLegend = True
+ self.legendKeys = []
+
+ self.newXLabel = ""
+ self.newXticks = []
+ self.newYLabel = ""
+ self.newYticks = []
+ self.newAxOffset = 60
+
+ self.xticks = []
+ self.xticklabels = []
+ self.yticks = []
+ self.yticklabels = []
+
+ self.alignYmax = False
+ self.horizontalLine = False
+ self.hLine = 0
+ self.hLineBoxX = 3.5
+ self.hLineBoxY = (1.021,)
+ self.hLineName = "Expected rate"
+
+ self.xLen = 0
+ self.yLen = 0
+ self.titleFontSize = 0
+ self.titleXOffset = 0
+ self.titleYOffset = 0
+
+ self.bar_plot_mix = False
+ self.lbottomLimit = 1
+ self.colorList = []
+ self.lineStyleList = []
+ self.markerList = []
+ self.barDescriptionBoxTxt = []
+ self.barTextBoxTxt = []
+ self.setBarOverlay = False
+ self.child = None
+ pass
+
+ def __del__(self):
+ if self.child is not None:
+ self.child.close(force=True)
+ self.child = None
+
+ #
+ # Setup/add data functions
+ #
+ def resetMe(self):
+ self.horizontalLine = False
+ self.alignYmax = False
+ self.bar_plot_mix = False
+ self.xLen = 0
+ self.yLen = 0
+ self.titleFontSize = 0
+ self.titleXOffset = 0
+ self.titleYOffset = 0
+ if self.graphs:
+ del self.graphs[:]
+ if self.yticks:
+ del self.yticks[:]
+ if self.yticklabels:
+ del self.yticklabels[:]
+ if self.xticks:
+ del self.xticks[:]
+ if self.xticklabels:
+ del self.xticklabels[:]
+ if self.colorList:
+ del self.colorList[:]
+ if self.lineStyleList:
+ del self.lineStyleList[:]
+ if self.markerList:
+ del self.markerList[:]
+ if self.newXticks:
+ del self.newXticks[:]
+ self.newXLabel = ""
+ if self.newYticks:
+ del self.newYticks[:]
+ self.newYLabel = ""
+ if self.barTextBoxTxt:
+ del self.barTextBoxTxt[:]
+ if self.barDescriptionBoxTxt:
+ del self.barDescriptionBoxTxt[:]
+
+ def setPlotName(self, plotname):
+ self.plotName = plotname
+
+ def getPlotName(self):
+ return self.plotName
+
+ def setNumSubplots(self, numSubPlots=1):
+ """
+ Set the number of subplots for a new figure.
+ delete previously stored graph data
+ """
+ self.numSubPlots = numSubPlots
+
+ def setGraphType(self, graphtype="plot"):
+ self.graphType = graphtype
+
+ def getGraphType(self):
+ return self.graphType
+
+ def getNumSubPlots(self):
+ return self.numSubPlots
+
+ def setNumSubPlots(self):
+ return self.numSubPlots
+
+ def setNumPlots(self, numPlots=1):
+ self.numPlots = numPlots
+
+ def setColors(self, colorlist):
+ self.colorList = colorlist
+
+ def setLineStyle(self, lineStylelist):
+ self.lineStyleList = lineStylelist
+
+ def setMarkers(self, markerlist):
+ self.markerList = markerlist
+
+ def setPutYticksOnFirstAxis(self, yticks, ytickLabels):
+ self.yticks = yticks
+ self.yticklabels = ytickLabels
+
+ def setPutXticksOnFirstAxis(self, xticks, xtickLabels):
+ self.xticks = xticks
+ self.xticklabels = xtickLabels
+
+ def setPutXticksOnSecondAxis(self, xticks, xLabel):
+ self.newXLabel = xLabel
+ self.newXticks = xticks
+
+ def setPutYticksOnSecondAxis(self, yticks, yLabel, axOffset=60):
+ self.newYLabel = yLabel
+ self.newYticks = yticks
+ self.newAxOffset = axOffset
+
+ def addBarDescriptionBoxTxt(self, plotnum, ovGroup=[], position="bottom left"):
+ self.barDescriptionBoxTxt.insert(plotnum, ovGroup)
+
+ def setBarTextBoxTxt(self, barText):
+ self.barTextBoxTxt = barText
+
+ def setBarWidth(self, plotNum, width):
+ self.graphs[plotNum].barWidth = width
+
+ def addBarYlabel(self, plotNum, ylabel):
+ self.graphs.insert(plotNum, graphit2d())
+ currGraph = self.graphs[plotNum]
+ self.graphs[plotNum].yLabel = ylabel
+
+ def setBarLegends(self, plotNum, legends):
+ self.graphs[plotNum].barLegends = legends
+
+ def addBarData(self, plotNum, xlabel, dataArray, barGraphXTitle=""):
+ self.graphs[plotNum].graphType = "bar"
+ self.graphs[plotNum].barNames.append(xlabel)
+ self.graphs[plotNum].barData.append(dataArray)
+ self.graphs[plotNum].barGraphXTitle = barGraphXTitle
+
+ def addPlotData(
+ self,
+ plotNum,
+ xlabel,
+ ylabel,
+ xticks,
+ yticks,
+ xData,
+ yData,
+ xExpData,
+ yExpData,
+ graphType="plot",
+ isLineRate=0,
+ ):
+ """
+ Add graph object if it doesn't exist
+ """
+ self.graphs.insert(plotNum, graphit2d())
+
+ currGraph = self.graphs[plotNum]
+
+ currGraph.isLineRate = isLineRate
+
+ if xlabel:
+ currGraph.xLabel = xlabel
+ if ylabel:
+ currGraph.yLabel = ylabel
+ if xticks:
+ currGraph.xticks = xticks
+ if yticks:
+ currGraph.yticks = yticks
+ if graphType:
+ currGraph.graphType = graphType
+
+ if len(xData) != len(yData):
+ print("Error xData = " + str(len(xData)))
+ print("yData = " + str(len(yData)))
+ print(xData)
+ print(yData)
+ return
+
+ currGraph.xs = xData
+ currGraph.ys = yData
+
+ if xExpData:
+ currGraph.expectedXs = xExpData
+ if yExpData:
+ currGraph.expectedYs = yExpData
+
+ def oneBar(self, ax, graph, key=[]):
+
+ dataSet1 = []
+ dataSet2 = []
+ width = graph.barWidth
+
+ for data in graph.barData:
+ dataSet1.append(data[0])
+ dataSet2.append(data[1])
+
+ ind = np.arange(len(dataSet1)) + distanceFromXAxis
+
+ ax.set_xticklabels(graph.barNames)
+ rects1 = ax.bar(ind, dataSet1, width, color="#512f28")
+ rects2 = ax.bar(ind + width, dataSet2, width, color="#11c6b1")
+
+ if graph.yLabel:
+ ax.set_ylabel(graph.yLabel)
+ if graph.title:
+ ax.set_title(graph.title)
+
+ if graph.barLegends:
+ ax.legend((rects1[0], rects2[0]), graph.barLegends)
+
+ ax.set_xticks(ind + width)
+ ax.set_xticklabels(graph.barNames)
+
+ def onePlot(self, ax, graph, key=[], lineStyle="-", Marker="x", color=next(colors)):
+
+ if graph.xLabel:
+ ax.set_xlabel(graph.xLabel)
+ if graph.yLabel:
+ ax.set_ylabel(graph.yLabel)
+
+ if graph.xticks:
+ ax.set_xticks(list(range(len(graph.xticks))))
+ ax.set_xticklabels(graph.xticks)
+ elif self.xticklabels:
+ ax.set_xticks(self.xticks)
+ ax.set_xticklabels(self.xticklabels)
+ if graph.yticks:
+ ax.set_yticks(list(range(len(graph.yticks))))
+ ax.set_yticklabels(graph.yticks)
+ elif self.yticklabels:
+ ax.set_yticks(self.yticks)
+ ax.set_yticklabels(self.yticklabels)
+
+ if graph.graphType and graph.graphType == "bar":
+
+ ind = np.arange(len(graph.xs)) + distanceFromXAxis
+ width = graph.barWidth
+
+ if key is not None:
+ ax.bar(ind, graph.ys, width=width, color="b", label=key)
+ else:
+ ax.bar(ind, graph.ys, width=width, color="b")
+ ax.set_xticks(ind + width)
+ ax.set_xticklabels(graph.xticks)
+
+ else:
+ if key is not None:
+ ax.plot(
+ graph.xs,
+ graph.ys,
+ color=color,
+ linestyle=lineStyle,
+ marker=Marker,
+ label=key,
+ )
+ else:
+ ax.plot(
+ graph.xs, graph.ys, color=color, linestyle=lineStyle, marker=Marker
+ )
+
+ # deprecated
+ if graph.expectedXs:
+ print("DEPRECATED")
+ return
+
+ if graph.graphType and graph.graphType == "bar":
+ ax.bar(
+ graph.expectedXs,
+ graph.expectedYs,
+ width=1,
+ color=next(expColors),
+ label="Exp" + key,
+ )
+
+ plt.xticks(graph.xticks, visible=False)
+ plt.yticks(graph.yticks)
+ else:
+ ax.plot(
+ graph.expectedXs,
+ graph.expectedYs,
+ linestyle="--",
+ color=next(expColors),
+ marker="o",
+ label=key,
+ )
+
+ def addnewYaxis(self, fig, oldAx):
+ newAx = fig.add_axes(oldAx.get_position())
+ newAx.patch.set_visible(False)
+ # newAx.yaxis.set_visible(False)
+ newAx.spines["left"].set_position(("outward", self.newAxOffset))
+ newAx.spines["left"].set_color("r")
+ newAx.spines["left"].set_facecolor("r")
+ newAx.spines["left"].set_edgecolor("r")
+
+ newAx.set_yticks(list(range(len(oldAx.get_yticks()))))
+ newAx.set_yticklabels(self.newYticks[0 : len(oldAx.get_yticks())])
+ newAx.set_ylabel(self.newYLabel, color="b")
+ newAx.yaxis.set_visible(True)
+ newAx.xaxis.set_visible(False)
+
+ """
+ generate graph(s) function
+ """
+
+ def multiBarPlots(self, numPlots, keys=[], Title=[], stack=False):
+ fig = plt.figure()
+ fig.set_size_inches(15, 10)
+ fig.suptitle(Title, fontsize=24, y=0.96)
+ self.hasLegend = True
+ ax1 = fig.add_axes([0.15, 0.1, 0.72, 0.8])
+ rects = []
+ lines = []
+ lbottoms = [0 for x in range(numPlots)]
+ rbottoms = [0 for x in range(numPlots)]
+ width = self.graphs[0].barWidth
+
+ for i in range(0, numPlots):
+ dataSet = []
+ color = color = next(barcolors)
+ if self.colorList:
+ color = self.colorList[i]
+ if True == stack:
+ dataSet = self.graphs[0].barData[i]
+ if 1 == len(self.graphs[0].barData[0]):
+ xmin, xmax = plt.xlim()
+ xmax = (width + distanceFromXAxis) * 2
+ plt.xlim(xmin=xmin, xmax=xmax)
+
+ ind = np.arange(len(dataSet)) + distanceFromXAxis
+ if self.lbottomLimit > i:
+ del lbottoms[len(dataSet) :]
+ rect = ax1.bar(
+ ind,
+ dataSet,
+ width,
+ color=color,
+ label=self.graphs[0].barLegends[i],
+ bottom=lbottoms,
+ )
+ j = 0
+ for x in dataSet:
+ lbottoms[j] += x
+ j += 1
+ else:
+ del rbottoms[len(dataSet) :]
+ rect = ax1.bar(
+ ind + width,
+ dataSet,
+ width,
+ color=color,
+ label=self.graphs[0].barLegends[i],
+ bottom=rbottoms,
+ )
+ j = 0
+ for x in dataSet:
+ rbottoms[j] += x
+ j += 1
+ else:
+ for data in self.graphs[0].barData:
+ dataSet.append(data[i])
+ ind = np.arange(len(dataSet)) + distanceFromXAxis
+ rect = ax1.bar(
+ ind + (width * i),
+ dataSet,
+ width,
+ label=self.graphs[0].barLegends[i],
+ color=color,
+ )
+
+ rects.append(rect)
+
+ del dataSet[:]
+
+ if True == stack:
+ ymin, ymax = plt.ylim()
+ if ymax > (math.ceil(ymax) - 0.5):
+ ymax = math.ceil(ymax) + 1
+ else:
+ ymax = math.ceil(ymax)
+ plt.ylim(ymin=ymin, ymax=ymax)
+
+ if self.newYticks:
+ self.addnewYaxis(fig, ax1)
+
+ # Draw a horizontal red line like a champion
+ if True == self.horizontalLine:
+ plt.axhline(y=self.hLine, color="r")
+ ax1.text(
+ self.hLineBoxX,
+ self.hLineBoxY,
+ self.hLineName,
+ bbox=dict(facecolor="red", alpha=0.5),
+ )
+
+ # TODO merge this into a single loop for plots and bar graphs
+ if True == self.bar_plot_mix:
+ color = color = next(barcolors)
+ for graph in self.graphs:
+ if graph.graphType == "bar":
+ continue
+ colorOffset = numPlots
+ if self.colorList:
+ color = self.colorList[colorOffset]
+ colorOffset += 1
+
+ line = ax1.plot(
+ graph.xs, graph.ys, marker="x", color=color, label=graph.xLabel
+ )
+
+ if self.graphs[0].yLabel:
+ ax1.set_ylabel(self.graphs[0].yLabel, fontsize=18)
+ if self.graphs[0].title:
+ ax1.set_title(self.graphs[0].title)
+
+ if True == stack:
+ # ax1.set_xticks(ind + (width * 1.5))
+ # ax1.set_xticks(ind + width)
+ ax1.set_xticks(ind + width * 0.5)
+ # ax1.set_xticklabels(self.graphs[0].barNames[2:6])
+ # ax1.set_xticklabels(self.graphs[0].barNames[1:5])
+ ax1.set_xticklabels(self.graphs[0].barNames[0:4])
+ # ax1.legend(rects[:], self.graphs[0].barLegends, fontsize=12, loc='upper right')
+ ax1.legend()
+ if self.barTextBoxTxt:
+ text = ""
+ # fp = dict(size=10)
+ x0 = 0.03
+ y0 = 1.2
+ for text in self.barTextBoxTxt:
+ at = ax1.annotate(
+ text, xy=(x0, y0), bbox=dict(boxstyle="round", fc="w")
+ )
+ ax1.add_artist(at)
+ x0 += 1
+
+ # _at = AnchoredText(text, loc=2, prop=fp)
+
+ else:
+ ax1.set_xticks(ind + (width * (numPlots / 2)))
+ ax1.set_xticklabels(self.graphs[0].barNames)
+ # ax1.legend(rects[:], self.graphs[0].barLegends)
+ ax1.legend()
+ # plt.legend(bbox_to_anchor=(0.9, 0.9, 0.1, 0.1),
+ # bbox_transform=plt.gcf().transFigure,
+ # fontsize=12)
+
+ # TODO complete this function
+ def addDescBoxTxt(self, ax, ovDescs):
+ # plt.setp(ax.get_xticklabels, visible=False)
+ # plt.setp(ax.get_yticklabels, visible=False)
+ # x_axis = ax.get_xaxis()
+ # y_axis = ax.get_yaxis()
+
+ # x_axis.set_visible(False)
+ # y_axis.set_visible(False)
+
+ ax.set_visible(False)
+
+ # at = ax1.annotate(text, xy=(x0, y0),
+ # bbox=dict(boxstyle="round", fc="w"))
+ # ax1.add_artist(at)
+ # x0 += 1
+
+ def fourBarGraphs(self, numGraphs, keys=[], Title=[]):
+ # get max value to be displayed
+ if True == self.alignYmax:
+ maxval = 0.3
+ for graph in self.graphs:
+ for x in graph.barData:
+ for y in x:
+ if y > maxval:
+ maxval = y + 0.1
+
+ fig, aXarr = plt.subplots(2, 2)
+ # fig.suptitle(Title, fontsize=24, y=0.96)
+ fig.suptitle(Title, fontsize=24)
+ fig.set_size_inches(15, 10)
+
+ x_range = [0, 1]
+ y_range = [0, 1]
+ k = 0
+
+ numSubPlotDisplay = self.numSubPlots + 1
+
+ for i in x_range:
+ for j in y_range:
+ numSubPlotDisplay -= 1
+ if 0 >= numSubPlotDisplay:
+ if self.barDescriptionBoxTxt:
+ # addDescBoxTxt(aXarr[i][j], self.barDescriptionBoxTxt)
+ aXarr[i][j].set_visible(False)
+ else:
+ aXarr[i][j].set_visible(False)
+ continue
+ if True == self.alignYmax:
+ aXarr[i][j].set_ylim(ymax=maxval, ymin=0)
+
+ if keys:
+ self.oneBar(aXarr[i][j], self.graphs[k], keys[k])
+ else:
+ self.oneBar(aXarr[i][j], self.graphs[k])
+ if "" != self.graphs[k].barGraphXTitle:
+ aXarr[i][j].set_xlabel(self.graphs[k].barGraphXTitle)
+
+ if self.newYticks and (0 == j):
+ self.addnewYaxis(fig, aXarr[i][0])
+ # graph reference
+ k += 1
+
+ plt.legend(bbox_to_anchor=(0.9, 1.1), loc="upper center")
+
+ # plt.setp([a.get_xticklabels() for a in aXarr[0, :]], visible=False)
+ # plt.setp([a.get_yticklabels() for a in aXarr[:, 1]], visible=False)
+
+ def multiGraph(self, numGraphs, keys=[]):
+ self.fig = plt.figure()
+ self.hasLegend = True
+ graphNum = 0
+ color = next(colors)
+ for i in range(0, numGraphs):
+ marker = "x"
+ lineStyle = "-"
+ if self.lineStyleList:
+ lineStyle = self.lineStyleList[i]
+ if self.markerList:
+ marker = self.markerList[i]
+ if self.colorList:
+ color = self.colorList[i]
+
+ graphNum += 1
+ key = []
+ if keys is not None:
+ key = keys[i]
+ subplotnum = int(str(numGraphs) + str(1) + str(i))
+ self.onePlot(
+ self.fig.add_subplot(subplotnum),
+ self.graphs[i],
+ key,
+ color=color,
+ lineStyle=lineStyle,
+ Marker=marker,
+ )
+
+ if self.newXticks:
+ # TODO - this is broken, needs to be moved into the above loop maybe..
+ newAx = self.fig.add_axes(ax.get_position())
+ newAx.patch.set_visible(False)
+ newAx.yaxis.set_visible(False)
+
+ newAx.spines["bottom"].set_position(("outward", 50))
+ # newAx.spines['bottom'].set_color('red')
+ # newAx.spines['bottom'].set_facecolor('red')
+ # newAx.spines['bottom'].set_edgecolor('red')
+
+ newAx.set_xticks(list(range(len(self.newXticks))))
+ newAx.set_xticklabels(self.newXticks)
+ newAx.set_xlabel(self.newXLabel, color="b")
+ newAx.xaxis.set_visible(True)
+
+ if keys is not None:
+ plt.legend(bbox_to_anchor=(0.9, 1.1), loc="upper center")
+
+ def multiPlots(self, numPlots, keys=[], Title=[]):
+ self.fig = plt.figure()
+ self.fig.set_size_inches(15, 10)
+ newAx = []
+
+ titleFontSize = self.titleFontSize
+ titleYOffset = self.titleYOffset
+ titleXOffset = self.titleXOffset
+ if 0 == self.titleFontSize:
+ titleFontSize = 24
+ if 0 == self.titleYOffset:
+ titleYOffset = 0.96
+ if 0 == self.titleXOffset:
+ self.fig.suptitle(Title, fontsize=titleFontSize, y=titleYOffset)
+ else:
+ self.fig.suptitle(
+ Title, fontsize=titleFontSize, y=titleYOffset, x=titleXOffset
+ )
+
+ self.hasLegend = True
+ graphNum = 0
+ if self.newXticks:
+ newAx = self.fig.add_axes([0.05, 0.1, 0.72, 0.8])
+ newAx.patch.set_visible(False)
+ newAx.yaxis.set_visible(False)
+
+ newAx.spines["bottom"].set_position(("outward", 35))
+ newAx.spines["bottom"].set_color("r")
+ newAx.spines["bottom"].set_facecolor("r")
+ newAx.spines["bottom"].set_edgecolor("r")
+
+ newAx.set_xticks(list(range(len(self.newXticks))))
+ newAx.set_xticklabels(self.newXticks)
+ newAx.set_xlabel(self.newXLabel, color="b")
+ newAx.xaxis.set_visible(True)
+
+ if newAx:
+ ax = self.fig.add_axes(newAx.get_position())
+ else:
+ xLen = self.xLen
+ yLen = self.yLen
+ if 0 == self.xLen:
+ xLen = 0.72
+ if 0 == self.yLen:
+ yLen = 0.8
+
+ ax = self.fig.add_axes([0.05, 0.1, xLen, yLen])
+
+ for i in range(0, numPlots):
+ marker = "x"
+ lineStyle = "-"
+ color = next(colors)
+ if self.lineStyleList:
+ lineStyle = self.lineStyleList[i]
+ if self.markerList:
+ marker = self.markerList[i]
+ if self.colorList:
+ color = self.colorList[i]
+
+ graphNum += 1
+ if keys is not None:
+ self.onePlot(
+ ax,
+ self.graphs[i],
+ keys[i],
+ color=color,
+ lineStyle=lineStyle,
+ Marker=marker,
+ )
+ else:
+ self.onePlot(
+ ax, self.graphs[i], color=color, lineStyle=lineStyle, Marker=marker
+ )
+
+ if True == self.horizontalLine:
+ plt.axhline(y=self.hLine, color="r")
+ ax.text(
+ self.hLineBoxX,
+ self.hLineBoxY,
+ self.hLineName,
+ bbox=dict(facecolor="red", alpha=0.5),
+ )
+
+ # plt.legend(bbox_to_anchor=(0.9, 1.1), loc = 'upper center')
+ if keys is not None:
+ plt.legend(
+ bbox_to_anchor=(0.9, 0.9, 0.1, 0.1),
+ bbox_transform=plt.gcf().transFigure,
+ fontsize=12,
+ )
+
+ def generatePlot(
+ self, plotName="output.jpg", keys=None, title=[], firstYvalue=0, firstXvalue=0
+ ):
+ """check num subplots is not too much"""
+
+ if self.numSubPlots > 4:
+ print("Max subplots exceeded: " + str(self.numSubPlots))
+ return
+
+ # generate graphs, write to file
+ if self.numPlots > 1:
+ self.multiPlots(self.numPlots, keys, title)
+ else:
+ self.multiGraph(self.numSubPlots, keys)
+
+ # write to file
+ if 0 < firstYvalue:
+ ymin, ymax = plt.ylim()
+ if ymax == math.ceil(ymax):
+ ymax += 2
+ else:
+ ymax = math.ceil(ymax) + 2
+ # ymin = ymax - 2
+ ymin = 0
+ plt.ylim(ymin=ymin, ymax=ymax)
+
+ xmin, xmax = plt.xlim()
+ xmin = 0
+ plt.xlim(xmin=xmin, xmax=xmax)
+ plt.savefig(plotName)
+
+ def generateBar(self, plotName="output.jpg", keys=[], title=[]):
+
+ if True == self.setBarOverlay:
+ self.multiBarPlots(self.numPlots, keys, title, stack=True)
+ else:
+ if self.numSubPlots > 1:
+ self.fourBarGraphs(self.numSubPlots, keys, title)
+ else:
+ self.multiBarPlots(self.numPlots, keys, title)
+
+ plt.savefig(plotName)
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 02/23] dts: merge DTS framework/plotting.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 01/23] dts: merge DTS framework/plotgraph.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 03/23] dts: merge DTS framework/pmd_output.py " Juraj Linkeš
` (20 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/plotting.py | 246 ++++++++++++++++++++++++++++++++++++++
1 file changed, 246 insertions(+)
create mode 100644 dts/framework/plotting.py
diff --git a/dts/framework/plotting.py b/dts/framework/plotting.py
new file mode 100644
index 0000000000..83499061ef
--- /dev/null
+++ b/dts/framework/plotting.py
@@ -0,0 +1,246 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import shutil
+
+from docutils.parsers.rst.directives import path
+
+import framework.plotgraph as plotgraph
+import framework.utils as utils
+
+from .exception import VerifyFailure
+from .plotgraph import Plot2DGraph
+from .rst import path2Result
+
+"""
+Generate Plots for performance test results
+"""
+
+
+class tableData(object):
+ def __init__(self):
+ self.headers = []
+ """
+ Each data array corresponds to a column related to one of the headers above
+ """
+ self.data = []
+
+
+class Plotting(object):
+
+ path_2_result = path2Result
+ plots_subfolder = "images"
+ image_format = "png"
+
+ default_bar_colours = [
+ "#f70202",
+ "#0f0b0b",
+ "#123eed",
+ "#07601b",
+ "#36f760",
+ "#87210d",
+ "#512f28",
+ "#11c6b1",
+ "#45f94e",
+ "#f94566",
+ ]
+
+ default_line_markers = ["o"]
+
+ default_line_styles = ["--"]
+
+ def __init__(self, crb, target, nic):
+
+ # Ensure the folder exist
+ try:
+
+ path = "/".join(
+ [Plotting.path_2_result, crb, target, nic, Plotting.plots_subfolder]
+ )
+
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+ self.plots_path = path
+
+ except Exception as e:
+ raise VerifyFailure("Plot Error: " + str(e))
+
+ def clear_all_plots(self, crb, target):
+ shutil.rmtree(self.plots_path, True)
+
+ def create_bars_plot(
+ self,
+ image_filename,
+ plot_title,
+ xdata,
+ ydata,
+ xlabel="",
+ ylabel="",
+ legend=[],
+ bar_colours=default_bar_colours,
+ ):
+
+ for yseries in ydata:
+ if len(xdata) != len(yseries):
+ print(
+ utils.RED(
+ "The number of items in X axis (%s) and Y axis (%s) does not match."
+ % (xdata, ydata)
+ )
+ )
+ return ""
+
+ image_path = "%s/%s.%s" % (
+ self.plots_path,
+ image_filename,
+ Plotting.image_format,
+ )
+
+ pgraph = Plot2DGraph()
+ pgraph.resetMe()
+
+ # Set the number of bars, ydata contains a array per set of data
+ pgraph.setNumPlots(len(ydata))
+ pgraph.setNumSubplots(1)
+
+ pgraph.setColors(bar_colours)
+ pgraph.addBarYlabel(0, ylabel)
+ pgraph.setBarLegends(0, legend)
+
+ # For each value in the x axis add corresponding bar (array in ydata)
+ for xvalue in range(len(xdata)):
+ yvalues = [_[xvalue] for _ in ydata]
+ pgraph.addBarData(0, xdata[xvalue], yvalues)
+
+ # Dynamic adjustment of the bar widths for better plot appearance
+ bar_width = 0.30 - 0.005 * ((len(xdata) * len(legend)) - 4)
+ pgraph.setBarWidth(0, bar_width)
+
+ pgraph.generateBar(plotName=image_path, title=plot_title)
+
+ return image_path
+
+ def create_lines_plot(
+ self,
+ image_filename,
+ plot_title,
+ xdata,
+ ydata,
+ xticks=[],
+ yticks=[],
+ xlabel="",
+ ylabel="",
+ legend=[],
+ line_colours=default_bar_colours,
+ line_markers=default_line_markers,
+ line_styles=default_line_styles,
+ addHline=False,
+ hLine={},
+ testing=False,
+ ):
+
+ image_path = "%s/%s.%s" % (
+ self.plots_path,
+ image_filename,
+ Plotting.image_format,
+ )
+
+ pgraph = Plot2DGraph()
+ pgraph.resetMe()
+
+ numPlots = len(ydata) / len(xticks)
+ numticks = len(xticks)
+
+ # Set the number of bars, ydata contains a array per set of data
+ pgraph.setNumPlots(numPlots)
+ # TODO more than one plot per figure needs to be tested
+ pgraph.setNumSubplots(1)
+
+ # workaround
+ if numPlots > len(line_colours):
+ print("WARNING - numPlots > len(line_colours)")
+ r = 0x00
+ g = 0x66
+ b = 0xFF
+ for _ in range(numPlots - len(line_colours)):
+ r = r % 256
+ g = g % 256
+ b = b % 256
+ _ = "#%0.2x%0.2x%0.2x" % (r, g, b)
+ line_colours.append(_)
+ r += 7
+ g -= 10
+ b -= 9
+
+ line_markers = line_markers * numPlots
+ line_styles = line_styles * numPlots
+
+ pgraph.setColors(line_colours)
+ pgraph.setMarkers(line_markers)
+ pgraph.setLineStyle(line_styles)
+
+ pgraph.addBarYlabel(0, ylabel)
+ pgraph.setBarLegends(0, legend)
+
+ # For each value in the x axis add corresponding bar (array in ydata)
+ for i in list(range(numPlots)):
+ yDataStart = i * numticks
+ pgraph.addPlotData(
+ i,
+ "Number of active pipes per output port",
+ ylabel,
+ xticks,
+ [],
+ xdata,
+ ydata[yDataStart : (yDataStart + numticks)],
+ [],
+ [],
+ )
+
+ pgraph.xLen = 0.6
+ pgraph.titleFontSize = 18
+ pgraph.titleYOffset = 0.96
+ pgraph.titleXOffset = 0.35
+
+ if addHline:
+ pgraph.horizontalLine = True
+ pgraph.hLineName = hLine["name"]
+ pgraph.hLine = hLine["value"]
+ pgraph.hLineBoxX = hLine["boxXvalue"]
+ pgraph.hLineBoxY = hLine["boxYvalue"]
+
+ pgraph.generatePlot(
+ plotName=image_path, keys=legend, title=plot_title, firstYvalue=1
+ )
+
+ return image_path
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 03/23] dts: merge DTS framework/pmd_output.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 01/23] dts: merge DTS framework/plotgraph.py " Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 02/23] dts: merge DTS framework/plotting.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 04/23] dts: merge DTS framework/qemu_kvm.py " Juraj Linkeš
` (19 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/pmd_output.py | 341 ++++++++++++++++++++++++++++++++++++
1 file changed, 341 insertions(+)
create mode 100644 dts/framework/pmd_output.py
diff --git a/dts/framework/pmd_output.py b/dts/framework/pmd_output.py
new file mode 100644
index 0000000000..f27c2513af
--- /dev/null
+++ b/dts/framework/pmd_output.py
@@ -0,0 +1,341 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+from time import sleep
+
+from .settings import PROTOCOL_PACKET_SIZE, TIMEOUT, get_nic_driver
+from .utils import create_mask
+
+
+class PmdOutput:
+
+ """
+ Module for get all statics value by port in testpmd
+ """
+
+ def __init__(self, dut, session=None):
+ self.dut = dut
+ if session is None:
+ session = dut
+ self.session = session
+ self.dut.testpmd = self
+ self.rx_pkts_prefix = "RX-packets:"
+ self.rx_missed_prefix = "RX-missed:"
+ self.rx_bytes_prefix = "RX-bytes:"
+ self.rx_badcrc_prefix = "RX-badcrc:"
+ self.rx_badlen_prefix = "RX-badlen:"
+ self.rx_error_prefix = "RX-errors:"
+ self.rx_nombuf_prefix = "RX-nombuf:"
+ self.tx_pkts_prefix = "TX-packets:"
+ self.tx_error_prefix = "TX-errors:"
+ self.tx_bytes_prefix = "TX-bytes:"
+ self.bad_ipcsum_prefix = "Bad-ipcsum:"
+ self.bad_l4csum_prefix = "Bad-l4csum:"
+ self.set_default_corelist()
+
+ def get_pmd_value(self, prefix, out):
+ pattern = re.compile(prefix + "(\s+)([0-9]+)")
+ m = pattern.search(out)
+ if m is None:
+ return None
+ else:
+ return int(m.group(2))
+
+ def set_default_corelist(self):
+ """
+ set default cores for start testpmd
+ """
+ core_number = len(self.dut.cores)
+ if core_number < 2:
+ raise ValueError(f"Not enough cores on DUT {self.dut}")
+ else:
+ self.default_cores = "1S/2C/1T"
+
+ def get_pmd_stats(self, portid):
+ stats = {}
+ out = self.session.send_expect("show port stats %d" % portid, "testpmd> ")
+ stats["RX-packets"] = self.get_pmd_value(self.rx_pkts_prefix, out)
+ stats["RX-missed"] = self.get_pmd_value(self.rx_missed_prefix, out)
+ stats["RX-bytes"] = self.get_pmd_value(self.rx_bytes_prefix, out)
+
+ stats["RX-badcrc"] = self.get_pmd_value(self.rx_badcrc_prefix, out)
+ stats["RX-badlen"] = self.get_pmd_value(self.rx_badlen_prefix, out)
+ stats["RX-errors"] = self.get_pmd_value(self.rx_error_prefix, out)
+ stats["RX-nombuf"] = self.get_pmd_value(self.rx_nombuf_prefix, out)
+ stats["TX-packets"] = self.get_pmd_value(self.tx_pkts_prefix, out)
+ stats["TX-errors"] = self.get_pmd_value(self.tx_error_prefix, out)
+ stats["TX-bytes"] = self.get_pmd_value(self.tx_bytes_prefix, out)
+
+ # display when testpmd config forward engine to csum
+ stats["Bad-ipcsum"] = self.get_pmd_value(self.bad_ipcsum_prefix, out)
+ stats["Bad-l4csum"] = self.get_pmd_value(self.bad_l4csum_prefix, out)
+ return stats
+
+ def get_pmd_cmd(self):
+ return self.command
+
+ def start_testpmd(
+ self,
+ cores="default",
+ param="",
+ eal_param="",
+ socket=0,
+ fixed_prefix=False,
+ expected="testpmd> ",
+ timeout=120,
+ **config,
+ ):
+ """
+ start testpmd with input parameters.
+ :param cores: eg:
+ cores='default'
+ cores='1S/4C/1T'
+ :param param: dpdk application (testpmd) parameters
+ :param eal_param: user defined DPDK eal parameters, eg:
+ eal_param='-a af:00.0 -a af:00.1,proto_xtr=vlan',
+ eal_param='-b af:00.0 --file-prefix=vf0',
+ eal_param='--no-pci',
+ :param socket: physical CPU socket index
+ :param fixed_prefix: use fixed file-prefix or not, when it is true,
+ the file-prefix will not be added a timestamp
+ :param config: kwargs user defined eal parameters, eg:
+ set PCI allow list: ports=[0,1], port_options={0: "proto_xtr=vlan"},
+ set PCI block list: b_ports=['0000:1a:00.0'],
+ disable PCI: no_pci=True,
+ add virtual device: vdevs=['net_vhost0,iface=vhost-net,queues=1']
+ :return: output of launching testpmd
+ """
+ eal_param = " " + eal_param + " "
+ eal_param = eal_param.replace(" -w ", " -a ")
+ re_file_prefix = "--file-prefix[\s*=]\S+\s"
+ file_prefix_str = re.findall(re_file_prefix, eal_param)
+ if file_prefix_str:
+ tmp = re.split("(=|\s+)", file_prefix_str[-1].strip())
+ file_prefix = tmp[-1].strip()
+ config["prefix"] = file_prefix
+ eal_param = re.sub(re_file_prefix, "", eal_param)
+ config["other_eal_param"] = eal_param
+
+ config["cores"] = cores
+ if (
+ " -w " not in eal_param
+ and " -a " not in eal_param
+ and " -b " not in eal_param
+ and "ports" not in config
+ and "b_ports" not in config
+ and " --no-pci " not in eal_param
+ and (
+ "no_pci" not in config
+ or ("no_pci" in config and config["no_pci"] != True)
+ )
+ ):
+ config["ports"] = [
+ self.dut.ports_info[i]["pci"] for i in range(len(self.dut.ports_info))
+ ]
+ all_eal_param = self.dut.create_eal_parameters(
+ fixed_prefix=fixed_prefix, socket=socket, **config
+ )
+
+ app_name = self.dut.apps_name["test-pmd"]
+ command = app_name + " %s -- -i %s" % (all_eal_param, param)
+ command = command.replace(" ", " ")
+ if self.session != self.dut:
+ self.session.send_expect("cd %s" % self.dut.base_dir, "# ")
+ out = self.session.send_expect(command, expected, timeout)
+ self.command = command
+ # wait 10s to ensure links getting up before test start.
+ sleep(10)
+ return out
+
+ def execute_cmd(
+ self, pmd_cmd, expected="testpmd> ", timeout=TIMEOUT, alt_session=False
+ ):
+ if "dut" in str(self.session):
+ return self.session.send_expect(
+ "%s" % pmd_cmd, expected, timeout=timeout, alt_session=alt_session
+ )
+ else:
+ return self.session.send_expect("%s" % pmd_cmd, expected, timeout=timeout)
+
+ def get_output(self, timeout=1):
+ if "dut" in str(self.session):
+ return self.session.get_session_output(timeout=timeout)
+ else:
+ return self.session.get_session_before(timeout=timeout)
+
+ def get_value_from_string(self, key_str, regx_str, string):
+ """
+ Get some values from the given string by the regular expression.
+ """
+ pattern = r"(?<=%s)%s" % (key_str, regx_str)
+ s = re.compile(pattern)
+ res = s.search(string)
+ if type(res).__name__ == "NoneType":
+ return " "
+ else:
+ return res.group(0)
+
+ def get_all_value_from_string(self, key_str, regx_str, string):
+ """
+ Get some values from the given string by the regular expression.
+ """
+ pattern = r"(?<=%s)%s" % (key_str, regx_str)
+ s = re.compile(pattern)
+ res = s.findall(string)
+ if type(res).__name__ == "NoneType":
+ return " "
+ else:
+ return res
+
+ def get_detail_from_port_info(self, key_str, regx_str, port):
+ """
+ Get the detail info from the output of pmd cmd 'show port info <port num>'.
+ """
+ out = self.session.send_expect("show port info %d" % port, "testpmd> ")
+ find_value = self.get_value_from_string(key_str, regx_str, out)
+ return find_value
+
+ def get_port_mac(self, port_id):
+ """
+ Get the specified port MAC.
+ """
+ return self.get_detail_from_port_info(
+ "MAC address: ", "([0-9A-F]{2}:){5}[0-9A-F]{2}", port_id
+ )
+
+ def get_firmware_version(self, port_id):
+ """
+ Get the firmware version.
+ """
+ return self.get_detail_from_port_info("Firmware-version: ", "\S.*", port_id)
+
+ def get_port_connect_socket(self, port_id):
+ """
+ Get the socket id which the specified port is connecting with.
+ """
+ return self.get_detail_from_port_info("Connect to socket: ", "\d+", port_id)
+
+ def get_port_memory_socket(self, port_id):
+ """
+ Get the socket id which the specified port memory is allocated on.
+ """
+ return self.get_detail_from_port_info(
+ "memory allocation on the socket: ", "\d+", port_id
+ )
+
+ def get_port_link_status(self, port_id):
+ """
+ Get the specified port link status now.
+ """
+ return self.get_detail_from_port_info("Link status: ", "\S+", port_id)
+
+ def get_port_link_speed(self, port_id):
+ """
+ Get the specified port link speed now.
+ """
+ return self.get_detail_from_port_info("Link speed: ", "\d+", port_id)
+
+ def get_port_link_duplex(self, port_id):
+ """
+ Get the specified port link mode, duplex or simplex.
+ """
+ return self.get_detail_from_port_info("Link duplex: ", "\S+", port_id)
+
+ def get_port_promiscuous_mode(self, port_id):
+ """
+ Get the promiscuous mode of port.
+ """
+ return self.get_detail_from_port_info("Promiscuous mode: ", "\S+", port_id)
+
+ def get_port_allmulticast_mode(self, port_id):
+ """
+ Get the allmulticast mode of port.
+ """
+ return self.get_detail_from_port_info("Allmulticast mode: ", "\S+", port_id)
+
+ def check_tx_bytes(self, tx_bytes, exp_bytes=0):
+ """
+ fortville nic will send lldp packet when nic setup with testpmd.
+ so should used (tx_bytes - exp_bytes) % PROTOCOL_PACKET_SIZE['lldp']
+ for check tx_bytes count right
+ """
+ # error_flag is true means tx_bytes different with expect bytes
+ error_flag = 1
+ for size in PROTOCOL_PACKET_SIZE["lldp"]:
+ error_flag = error_flag and (tx_bytes - exp_bytes) % size
+
+ return not error_flag
+
+ def get_port_vlan_offload(self, port_id):
+ """
+ Function: get the port vlan setting info.
+ return value:
+ 'strip':'on'
+ 'filter':'on'
+ 'qinq':'off'
+ """
+ vlan_info = {}
+ vlan_info["strip"] = self.get_detail_from_port_info("strip ", "\S+", port_id)
+ vlan_info["filter"] = self.get_detail_from_port_info("filter", "\S+", port_id)
+ vlan_info["qinq"] = self.get_detail_from_port_info(
+ "qinq\(extend\) ", "\S+", port_id
+ )
+ return vlan_info
+
+ def quit(self):
+ self.session.send_expect("quit", "# ")
+
+ def wait_link_status_up(self, port_id, timeout=10):
+ """
+ check the link status is up
+ if not, loop wait
+ """
+ for i in range(timeout):
+ out = self.session.send_expect(
+ "show port info %s" % str(port_id), "testpmd> "
+ )
+ status = self.get_all_value_from_string("Link status: ", "\S+", out)
+ if "down" not in status:
+ break
+ sleep(1)
+ return "down" not in status
+
+ def get_max_rule_number(self, obj, out):
+ res = re.search(
+ r"fd_fltr_guar\s+=\s+(\d+).*fd_fltr_best_effort\s+=\s+(\d+)\.", out
+ )
+ obj.verify(res, "'fd_fltr_guar' and 'fd_fltr_best_effort not found'")
+ fltr_guar, fltr_best = res.group(1), res.group(2)
+ max_rule = int(fltr_guar) + int(fltr_best)
+ obj.logger.info(f"this Card max rule number is :{max_rule}")
+ return max_rule
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 04/23] dts: merge DTS framework/qemu_kvm.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (2 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 03/23] dts: merge DTS framework/pmd_output.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 05/23] dts: merge DTS framework/qemu_libvirt.py " Juraj Linkeš
` (18 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/qemu_kvm.py | 2042 +++++++++++++++++++++++++++++++++++++
1 file changed, 2042 insertions(+)
create mode 100644 dts/framework/qemu_kvm.py
diff --git a/dts/framework/qemu_kvm.py b/dts/framework/qemu_kvm.py
new file mode 100644
index 0000000000..5615263bd8
--- /dev/null
+++ b/dts/framework/qemu_kvm.py
@@ -0,0 +1,2042 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import os
+import re
+import time
+
+from .exception import StartVMFailedException
+from .settings import DTS_PARALLEL_SETTING, get_host_ip, load_global_setting
+from .utils import RED, parallel_lock
+from .virt_base import ST_NOTSTART, ST_PAUSE, ST_RUNNING, ST_UNKNOWN, VirtBase
+
+# This name is directly defined in the qemu guest service
+# So you can not change it except it is changed by the service
+QGA_DEV_NAME = "org.qemu.guest_agent.0"
+
+
+def handle_control_session(func):
+ """
+ Wrapper function to handle serial port, must return serial to host session
+ """
+
+ def _handle_control_session(self, command):
+ # just raise error if connect failed, for func can't all any more
+ try:
+ if self.control_type == "socket":
+ assert self.connect_serial_port(
+ name=self.vm_name
+ ), "Can't connect to serial socket"
+ elif self.control_type == "telnet":
+ assert self.connect_telnet_port(
+ name=self.vm_name
+ ), "Can't connect to serial port"
+ else:
+ assert self.connect_qga_port(
+ name=self.vm_name
+ ), "Can't connect to qga port"
+ except:
+ return "Failed"
+
+ try:
+ out = func(self, command)
+ self.quit_control_session()
+ return out
+ except Exception as e:
+ print(
+ RED(
+ "Exception happened on [%s] serial with cmd [%s]"
+ % (self.vm_name, command)
+ )
+ )
+ print(RED(e))
+ self.close_control_session(dut_id=self.host_dut.dut_id)
+ return "Failed"
+
+ return _handle_control_session
+
+
+class QEMUKvm(VirtBase):
+
+ DEFAULT_BRIDGE = "br0"
+ QEMU_IFUP = (
+ "#!/bin/sh\n\n"
+ + "set -x\n\n"
+ + "switch=%(switch)s\n\n"
+ + "if [ -n '$1' ];then\n"
+ + " tunctl -t $1\n"
+ + " ip link set $1 up\n"
+ + " sleep 0.5s\n"
+ + " brctl addif $switch $1\n"
+ + " exit 0\n"
+ + "else\n"
+ + " echo 'Error: no interface specified'\n"
+ + " exit 1\n"
+ + "fi"
+ )
+
+ QEMU_IFUP_PATH = "/etc/qemu-ifup"
+ # Default login session timeout value
+ LOGIN_TIMEOUT = 60
+ # By default will wait 120 seconds for VM start
+ # If VM not ready in this period, will try restart it once
+ START_TIMEOUT = 120
+ # Default timeout value for operation when VM starting
+ OPERATION_TIMEOUT = 20
+ # Default login prompt
+ LOGIN_PROMPT = "login:"
+ # Default password prompt
+ PASSWORD_PROMPT = "Password:"
+
+ def __init__(self, dut, vm_name, suite_name):
+ super(QEMUKvm, self).__init__(dut, vm_name, suite_name)
+
+ # initialize qemu emulator, example: qemu-system-x86_64
+ self.qemu_emulator = self.get_qemu_emulator()
+
+ # initialize qemu boot command line
+ # example: qemu-system-x86_64 -name vm1 -m 2048 -vnc :1 -daemonize
+ self.qemu_boot_line = ""
+
+ # initialize some resource used by guest.
+ self.init_vm_request_resource()
+
+ # character and network device default index
+ self.char_idx = 0
+ self.netdev_idx = 0
+ self.pt_idx = 0
+ self.cuse_id = 0
+ # devices pass-through into vm
+ self.pt_devices = []
+ self.pci_maps = []
+
+ # default login user,password
+ self.username = dut.crb["user"]
+ self.password = dut.crb["pass"]
+
+ # internal variable to track whether default nic has been added
+ self.__default_nic = False
+
+ # arch info for multi-platform init
+ self.arch = self.host_session.send_expect("uname -m", "# ")
+
+ # set some default values for vm,
+ # if there is not the values of the specified options
+ self.set_vm_default()
+
+ self.am_attached = False
+
+ # allow restart VM when can't login
+ self.restarted = False
+
+ def check_alive(self):
+ """
+ Check whether VM is alive for has been start up
+ """
+ pid_regx = r"p(\d+)"
+ out = self.host_session.send_expect(
+ "lsof -Fp /tmp/.%s.pid" % self.vm_name, "#", timeout=30
+ )
+ for line in out.splitlines():
+ m = re.match(pid_regx, line)
+ if m:
+ self.host_logger.info("Found VM %s already running..." % m.group(0))
+ return True
+ return False
+
+ def kill_alive(self):
+ pid_regx = r"p(\d+)"
+ out = self.host_session.send_expect(
+ "lsof -Fp /tmp/.%s.pid" % self.vm_name, "# "
+ )
+ for line in out.splitlines():
+ m = re.match(pid_regx, line)
+ if m:
+ self.host_session.send_expect("kill -9 %s" % m.group(0)[1:], "# ")
+
+ def set_vm_default(self):
+ self.set_vm_name(self.vm_name)
+ if self.arch == "aarch64":
+ self.set_vm_machine("virt")
+ self.set_vm_enable_kvm()
+ self.set_vm_pid_file()
+ self.set_vm_daemon()
+ self.set_vm_monitor()
+
+ self.nic_num = 1
+ if not self.__default_nic:
+ # add default control interface
+ def_nic = {"type": "nic"}
+ self.set_vm_net(**def_nic)
+ def_net = {"type": "user"}
+ self.set_vm_net(**def_net)
+ self.__default_nic = True
+
+ def init_vm_request_resource(self):
+ """
+ initialize some resource used by VM.
+ examples: CPU, PCIs, so on.
+ CPU:
+ initialize vcpus what will be pinned to the VM.
+ If specify this param, the specified vcpus will
+ be pinned to VM by the command 'taskset' when
+ starting the VM.
+ example:
+ vcpus_pinned_to_vm = '1 2 3 4'
+ taskset -c 1,2,3,4 qemu-boot-command-line
+ """
+ self.vcpus_pinned_to_vm = ""
+
+ # initialize assigned PCI
+ self.assigned_pcis = []
+
+ def get_virt_type(self):
+ """
+ Get the virtual type.
+ """
+ return "KVM"
+
+ def get_qemu_emulator(self):
+ """
+ Get the qemu emulator based on the crb.
+ """
+ arch = self.host_session.send_expect("uname -m", "# ")
+ return "qemu-system-" + arch
+
+ def set_qemu_emulator(self, qemu_emulator_path):
+ """
+ Set the qemu emulator in the specified path explicitly.
+ """
+ out = self.host_session.send_expect("ls %s" % qemu_emulator_path, "# ")
+ if "No such file or directory" in out:
+ self.host_logger.error(
+ "No emulator [ %s ] on the DUT [ %s ]"
+ % (qemu_emulator_path, self.host_dut.get_ip_address())
+ )
+ return None
+ out = self.host_session.send_expect(
+ "[ -x %s ];echo $?" % qemu_emulator_path, "# "
+ )
+ if out != "0":
+ self.host_logger.error(
+ "Emulator [ %s ] not executable on the DUT [ %s ]"
+ % (qemu_emulator_path, self.host_dut.get_ip_address())
+ )
+ return None
+ self.qemu_emulator = qemu_emulator_path
+
+ def add_vm_qemu(self, **options):
+ """
+ path: absolute path for qemu emulator
+ """
+ if "path" in list(options.keys()):
+ self.set_qemu_emulator(options["path"])
+
+ def has_virtual_ability(self):
+ """
+ Check if host has the virtual ability.
+ """
+ out = self.host_session.send_expect("cat /proc/cpuinfo | grep flags", "# ")
+ rgx = re.search(" vmx ", out)
+ if rgx:
+ pass
+ else:
+ self.host_logger.warning("Hardware virtualization disabled on host!!!")
+ return False
+
+ out = self.host_session.send_expect("lsmod | grep kvm", "# ")
+ if "kvm" in out and "kvm_intel" in out:
+ return True
+ else:
+ self.host_logger.warning("kvm or kvm_intel not insmod!!!")
+ return False
+
+ def enable_virtual_ability(self):
+ """
+ Load the virtual module of kernel to enable the virtual ability.
+ """
+ self.host_session.send_expect("modprobe kvm", "# ")
+ self.host_session.send_expect("modprobe kvm_intel", "# ")
+ return True
+
+ def disk_image_is_ok(self, image):
+ """
+ Check if the image is OK and no error.
+ """
+ pass
+
+ def image_is_used(self, image_path):
+ """
+ Check if the image has been used on the host.
+ """
+ qemu_cmd_lines = self.host_session.send_expect(
+ "ps aux | grep qemu | grep -v grep", "# "
+ )
+
+ image_name_flag = "/" + image_path.strip().split("/")[-1] + " "
+ if image_path in qemu_cmd_lines or image_name_flag in qemu_cmd_lines:
+ return True
+ return False
+
+ def __add_boot_line(self, option_boot_line):
+ """
+ Add boot option into the boot line.
+ """
+ separator = " "
+ self.qemu_boot_line += separator + option_boot_line
+
+ def set_vm_enable_kvm(self, enable="yes"):
+ """
+ Set VM boot option to enable the option 'enable-kvm'.
+ """
+ index = self.find_option_index("enable_kvm")
+ if index:
+ self.params[index] = {"enable_kvm": [{"enable": "%s" % enable}]}
+ else:
+ self.params.append({"enable_kvm": [{"enable": "%s" % enable}]})
+
+ def add_vm_enable_kvm(self, **options):
+ """
+ 'enable': 'yes'
+ """
+ if "enable" in list(options.keys()) and options["enable"] == "yes":
+ enable_kvm_boot_line = "-enable-kvm"
+ self.__add_boot_line(enable_kvm_boot_line)
+
+ def set_vm_machine(self, machine):
+ """
+ Set VM boot option to specify the option 'machine'.
+ """
+ index = self.find_option_index("machine")
+ if index:
+ self.params[index] = {"machine": [{"machine": "%s" % machine}]}
+ else:
+ self.params.append({"machine": [{"machine": "%s" % machine}]})
+
+ def add_vm_machine(self, **options):
+ """
+ 'machine': 'virt','opt_gic_version'
+ """
+ machine_boot_line = "-machine"
+ separator = ","
+ if "machine" in list(options.keys()) and options["machine"]:
+ machine_boot_line += " %s" % options["machine"]
+ if "opt_gic_version" in list(options.keys()) and options["opt_gic_version"]:
+ machine_boot_line += (
+ separator + "gic_version=%s" % options["opt_gic_version"]
+ )
+
+ self.__add_boot_line(machine_boot_line)
+
+ def set_vm_pid_file(self):
+ """
+ Set VM pidfile option for manage qemu process
+ """
+ self.__pid_file = "/tmp/.%s.pid" % self.vm_name
+ index = self.find_option_index("pid_file")
+ if index:
+ self.params[index] = {"pid_file": [{"name": "%s" % self.__pid_file}]}
+ else:
+ self.params.append({"pid_file": [{"name": "%s" % self.__pid_file}]})
+
+ def add_vm_pid_file(self, **options):
+ """
+ 'name' : '/tmp/.qemu_vm0.pid'
+ """
+ if "name" in list(options.keys()):
+ self.__add_boot_line("-pidfile %s" % options["name"])
+
+ def set_vm_name(self, vm_name):
+ """
+ Set VM name.
+ """
+ index = self.find_option_index("name")
+ if index:
+ self.params[index] = {"name": [{"name": "%s" % vm_name}]}
+ else:
+ self.params.append({"name": [{"name": "%s" % vm_name}]})
+
+ def add_vm_name(self, **options):
+ """
+ name: vm1
+ """
+ if "name" in list(options.keys()) and options["name"]:
+ name_boot_line = "-name %s" % options["name"]
+ self.__add_boot_line(name_boot_line)
+
+ def add_vm_cpu(self, **options):
+ """
+ model: [host | core2duo | ...]
+ usage:
+ choose model value from the command
+ qemu-system-x86_64 -cpu help
+ number: '4' #number of vcpus
+ cpupin: '3 4 5 6' # host cpu list
+ """
+ if "model" in list(options.keys()) and options["model"]:
+ cpu_boot_line = "-cpu %s" % options["model"]
+ self.__add_boot_line(cpu_boot_line)
+ if "number" in list(options.keys()) and options["number"]:
+ smp_cmd_line = "-smp %d" % int(options["number"])
+ self.__add_boot_line(smp_cmd_line)
+ if "cpupin" in list(options.keys()) and options["cpupin"]:
+ self.vcpus_pinned_to_vm = str(options["cpupin"])
+
+ def add_vm_mem(self, **options):
+ """
+ size: 1024
+ """
+ if "size" in list(options.keys()):
+ mem_boot_line = "-m %s" % options["size"]
+ self.__add_boot_line(mem_boot_line)
+ if "hugepage" in list(options.keys()):
+ if options["hugepage"] == "yes":
+ mem_boot_huge = (
+ "-object memory-backend-file,"
+ + "id=mem,size=%sM,mem-path=%s,share=on"
+ % (options["size"], self.host_dut.hugepage_path)
+ )
+
+ self.__add_boot_line(mem_boot_huge)
+ mem_boot_huge_opt = "-numa node,memdev=mem -mem-prealloc"
+ self.__add_boot_line(mem_boot_huge_opt)
+
+ def add_vm_disk(self, **options):
+ """
+ file: /home/image/test.img
+ opt_format: raw
+ opt_if: virtio
+ opt_index: 0
+ opt_media: disk
+ """
+ separator = ","
+ if "file" in list(options.keys()) and options["file"]:
+ disk_boot_line = "-drive file=%s" % options["file"]
+ else:
+ return False
+
+ if "opt_format" in list(options.keys()) and options["opt_format"]:
+ disk_boot_line += separator + "format=%s" % options["opt_format"]
+ if "opt_if" in list(options.keys()) and options["opt_if"]:
+ disk_boot_line += separator + "if=%s" % options["opt_if"]
+ if "opt_index" in list(options.keys()) and options["opt_index"]:
+ disk_boot_line += separator + "index=%s" % options["opt_index"]
+ if "opt_media" in list(options.keys()) and options["opt_media"]:
+ disk_boot_line += separator + "media=%s" % options["opt_media"]
+
+ self.__add_boot_line(disk_boot_line)
+
+ def add_vm_pflash(self, **options):
+ """
+ file: /home/image/flash0.img
+ """
+ if "file" in list(options.keys()):
+ pflash_boot_line = "-pflash %s" % options["file"]
+ self.__add_boot_line(pflash_boot_line)
+
+ def add_vm_start(self, **options):
+ """
+ Update VM start and login related settings
+ """
+ if "wait_seconds" in list(options.keys()):
+ self.START_TIMEOUT = int(options["wait_seconds"])
+ if "login_timeout" in list(options.keys()):
+ self.LOGIN_TIMEOUT = int(options["login_timeout"])
+ if "login_prompt" in list(options.keys()):
+ self.LOGIN_PROMPT = options["login_prompt"]
+ if "password_prompt" in list(options.keys()):
+ self.PASSWORD_PROMPT = options["password_prompt"]
+
+ def add_vm_login(self, **options):
+ """
+ user: login username of virtual machine
+ password: login password of virtual machine
+ """
+ if "user" in list(options.keys()):
+ user = options["user"]
+ self.username = user
+
+ if "password" in list(options.keys()):
+ password = options["password"]
+ self.password = password
+
+ def get_vm_login(self):
+ return (self.username, self.password)
+
+ def set_vm_net(self, **options):
+ index = self.find_option_index("net")
+ if index:
+ self.params[index]["net"].append(options)
+ else:
+ self.params.append({"net": [options]})
+
+ def add_vm_net(self, **options):
+ """
+ Add VM net device.
+ type: [nic | user | tap | bridge | ...]
+ opt_[vlan | fd | br | mac | ...]
+ note:the sub-option will be decided according to the net type.
+ """
+ if "type" in list(options.keys()):
+ if options["type"] == "nic":
+ self.__add_vm_net_nic(**options)
+ if options["type"] == "user":
+ self.__add_vm_net_user(**options)
+ if options["type"] == "tap":
+ self.__add_vm_net_tap(**options)
+
+ if options["type"] == "user":
+ self.net_type = "hostfwd"
+ elif options["type"] in ["tap", "bridge"]:
+ self.net_type = "bridge"
+
+ def add_vm_kernel(self, **options):
+ """
+ Add Kernel Image explicitly
+ kernel_img: path to kernel Image
+ console: console details in kernel boot args
+ baudrate: console access baudrate in kernel boot args
+ root: root partition details in kernel boot args
+ """
+ print(options)
+ if "kernel_img" in list(options.keys()) and options["kernel_img"]:
+ kernel_boot_line = "-kernel %s" % options["kernel_img"]
+ else:
+ return False
+ self.__add_boot_line(kernel_boot_line)
+ kernel_args = ""
+ if "console" in list(options.keys()) and options["console"]:
+ kernel_args = "console=%s" % options["console"]
+ if "baudrate" in list(options.keys()) and options["baudrate"]:
+ kernel_args += "," + options["baudrate"]
+ if "root" in list(options.keys()) and options["root"]:
+ kernel_args += " root=%s" % options["root"]
+ if kernel_args:
+ append_boot_line = '--append "%s"' % kernel_args
+ self.__add_boot_line(append_boot_line)
+
+ def __add_vm_net_nic(self, **options):
+ """
+ type: nic
+ opt_model:["e1000" | "virtio" | "i82551" | ...]
+ note: Default is e1000.
+ """
+ net_boot_line = "-device "
+ separator = ","
+
+ if "opt_model" in list(options.keys()) and options["opt_model"]:
+ model = options["opt_model"]
+ else:
+ model = "e1000"
+ self.nic_model = model
+ net_boot_line += model
+
+ netdev_id = self.nic_num
+ if self.nic_num % 2 == 0:
+ netdev_id = self.nic_num - 1
+ netdev = "netdev=nttsip%d " % netdev_id
+ self.nic_num = self.nic_num + 1
+ net_boot_line += separator + netdev
+
+ if self.__string_has_multi_fields(net_boot_line, separator):
+ self.__add_boot_line(net_boot_line)
+
+ def __add_vm_net_user(self, **options):
+ """
+ type: user
+ opt_hostfwd: [tcp|udp]:[hostaddr]:hostport-[guestaddr]:guestport
+ """
+ net_boot_line = "-netdev user"
+ separator = ","
+ netdev_id = self.nic_num
+ if self.nic_num % 2 == 0:
+ netdev_id = self.nic_num - 1
+ self.nic_num = self.nic_num + 1
+ netdev = "id=nttsip%d" % netdev_id
+ net_boot_line += separator + netdev
+ if "opt_hostfwd" in list(options.keys()) and options["opt_hostfwd"]:
+ self.__check_net_user_opt_hostfwd(options["opt_hostfwd"])
+ opt_hostfwd = options["opt_hostfwd"]
+ else:
+ opt_hostfwd = "::-:"
+ hostfwd_line = self.__parse_net_user_opt_hostfwd(opt_hostfwd)
+ net_boot_line += separator + "hostfwd=%s" % hostfwd_line
+
+ if self.__string_has_multi_fields(net_boot_line, separator):
+ self.__add_boot_line(net_boot_line)
+
+ def __check_net_user_opt_hostfwd(self, opt_hostfwd):
+ """
+ Use regular expression to check if hostfwd value format is correct.
+ """
+ regx_ip = "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
+ regx_hostfwd = r"(tcp|udp)?:(%s)?:\d+-(%s)?:\d+" % (regx_ip, regx_ip)
+ if not re.match(regx_hostfwd, opt_hostfwd):
+ raise Exception(
+ "Option opt_hostfwd format is not correct,\n"
+ + "it is %s,\n " % opt_hostfwd
+ + "it should be [tcp|udp]:[hostaddr]:hostport-"
+ + "[guestaddr]:guestport.\n"
+ )
+
+ def __parse_net_user_opt_hostfwd(self, opt_hostfwd):
+ """
+ Parse the boot option 'hostfwd'.
+ """
+ separator = ":"
+ field = lambda option, index, separator=":": option.split(separator)[index]
+
+ # get the forward type
+ fwd_type = field(opt_hostfwd, 0)
+ if not fwd_type:
+ fwd_type = "tcp"
+
+ # get the host addr
+ host_addr = field(opt_hostfwd, 1)
+ if not host_addr:
+ addr = str(self.host_dut.get_ip_address())
+ host_addr = get_host_ip(addr)
+
+ # get the host port in the option
+ host_port = field(opt_hostfwd, 2).split("-")[0]
+
+ # if no host assigned, just allocate it
+ if not host_port:
+ host_port = str(
+ self.virt_pool.alloc_port(self.vm_name, port_type="connect")
+ )
+
+ self.redir_port = host_port
+
+ # get the guest addr
+ try:
+ guest_addr = str(field(opt_hostfwd, 2).split("-")[1])
+ except IndexError as e:
+ guest_addr = ""
+
+ # get the guest port in the option
+ guest_port = str(field(opt_hostfwd, 3))
+ if not guest_port:
+ guest_port = "22"
+
+ hostfwd_line = (
+ fwd_type
+ + separator
+ + host_addr
+ + separator
+ + host_port
+ + "-"
+ + guest_addr
+ + separator
+ + guest_port
+ )
+
+ # init the redirect incoming TCP or UDP connections
+ # just combine host address and host port, it is enough
+ # for using ssh to connect with VM
+ if not hasattr(self, "hostfwd_addr"):
+ self.hostfwd_addr = host_addr + separator + host_port
+
+ return hostfwd_line
+
+ def __add_vm_net_tap(self, **options):
+ """
+ type: tap
+ opt_br: br0
+ note: if choosing tap, need to specify bridge name,
+ else it will be br0.
+ opt_script: QEMU_IFUP_PATH
+ note: if not specified, default is self.QEMU_IFUP_PATH.
+ opt_downscript: QEMU_IFDOWN_PATH
+ note: if not specified, default is self.QEMU_IFDOWN_PATH.
+ """
+ net_boot_line = "-netdev tap"
+ separator = ","
+
+ netdev_id = self.nic_num
+ if self.nic_num % 2 == 0:
+ netdev_id = self.nic_num - 1
+ self.nic_num = self.nic_num + 1
+ netdev = "id=nttsip%d" % netdev_id
+ net_boot_line += separator + netdev
+
+ # add bridge info
+ if "opt_br" in list(options.keys()) and options["opt_br"]:
+ bridge = options["opt_br"]
+ else:
+ bridge = self.DEFAULT_BRIDGE
+ self.__generate_net_config_script(str(bridge))
+
+ # add network configure script path
+ if "opt_script" in list(options.keys()) and options["opt_script"]:
+ script_path = options["opt_script"]
+ else:
+ script_path = self.QEMU_IFUP_PATH
+ net_boot_line += separator + "script=%s" % script_path
+
+ # add network configure downscript path
+ if "opt_downscript" in list(options.keys()) and options["opt_downscript"]:
+ net_boot_line += separator + "downscript=%s" % options["opt_downscript"]
+
+ if self.__string_has_multi_fields(net_boot_line, separator):
+ self.__add_boot_line(net_boot_line)
+
+ def __generate_net_config_script(self, switch=DEFAULT_BRIDGE):
+ """
+ Generate a script for qemu emulator to build a tap device
+ between host and guest.
+ """
+ qemu_ifup = self.QEMU_IFUP % {"switch": switch}
+ file_name = os.path.basename(self.QEMU_IFUP_PATH)
+ tmp_file_path = "/tmp/%s" % file_name
+ self.host_dut.create_file(qemu_ifup, tmp_file_path)
+ self.host_session.send_expect(
+ "mv -f ~/%s %s" % (file_name, self.QEMU_IFUP_PATH), "# "
+ )
+ self.host_session.send_expect("chmod +x %s" % self.QEMU_IFUP_PATH, "# ")
+
+ def set_vm_device(self, driver="pci-assign", **opts):
+ """
+ Set VM device with specified driver.
+ """
+ opts["driver"] = driver
+ index = self.find_option_index("device")
+ if index:
+ self.params[index]["device"].append(opts)
+ else:
+ self.params.append({"device": [opts]})
+
+ # start up time may increase after add device
+ self.START_TIMEOUT += 8
+
+ def add_vm_device(self, **options):
+ """
+ driver: [pci-assign | virtio-net-pci | ...]
+ opt_[host | addr | ...]: value
+ note:the sub-option will be decided according to the driver.
+ """
+ if "driver" in list(options.keys()) and options["driver"]:
+ if options["driver"] == "pci-assign":
+ self.__add_vm_pci_assign(**options)
+ elif options["driver"] == "virtio-net-pci":
+ self.__add_vm_virtio_net_pci(**options)
+ elif options["driver"] == "vhost-user":
+ self.__add_vm_virtio_user_pci(**options)
+ elif options["driver"] == "vhost-cuse":
+ self.__add_vm_virtio_cuse_pci(**options)
+ elif options["driver"] == "vfio-pci":
+ self.__add_vm_pci_vfio(**options)
+
+ def __add_vm_pci_vfio(self, **options):
+ """
+ driver: vfio-pci
+ opt_host: 08:00.0
+ opt_addr: 00:00:00:00:01:02
+ """
+ dev_boot_line = "-device vfio-pci"
+ separator = ","
+ if "opt_host" in list(options.keys()) and options["opt_host"]:
+ dev_boot_line += separator + "host=%s" % options["opt_host"]
+ dev_boot_line += separator + "id=pt_%d" % self.pt_idx
+ self.pt_idx += 1
+ self.pt_devices.append(options["opt_host"])
+ if "opt_addr" in list(options.keys()) and options["opt_addr"]:
+ dev_boot_line += separator + "addr=%s" % options["opt_addr"]
+ self.assigned_pcis.append(options["opt_addr"])
+
+ if self.__string_has_multi_fields(dev_boot_line, separator):
+ self.__add_boot_line(dev_boot_line)
+
+ def __add_vm_pci_assign(self, **options):
+ """
+ driver: pci-assign
+ opt_host: 08:00.0
+ opt_addr: 00:00:00:00:01:02
+ """
+ dev_boot_line = "-device pci-assign"
+ separator = ","
+ if "opt_host" in list(options.keys()) and options["opt_host"]:
+ dev_boot_line += separator + "host=%s" % options["opt_host"]
+ dev_boot_line += separator + "id=pt_%d" % self.pt_idx
+ self.pt_idx += 1
+ self.pt_devices.append(options["opt_host"])
+ if "opt_addr" in list(options.keys()) and options["opt_addr"]:
+ dev_boot_line += separator + "addr=%s" % options["opt_addr"]
+ self.assigned_pcis.append(options["opt_addr"])
+
+ if self.__string_has_multi_fields(dev_boot_line, separator):
+ self.__add_boot_line(dev_boot_line)
+
+ def __add_vm_virtio_user_pci(self, **options):
+ """
+ driver virtio-net-pci
+ opt_path: /tmp/vhost-net
+ opt_mac: 00:00:20:00:00:00
+ """
+ separator = ","
+ # chardev parameter
+ netdev_id = "netdev%d" % self.netdev_idx
+ if "opt_script" in list(options.keys()) and options["opt_script"]:
+ if "opt_br" in list(options.keys()) and options["opt_br"]:
+ bridge = options["opt_br"]
+ else:
+ bridge = self.DEFAULT_BRIDGE
+ self.__generate_net_config_script(str(bridge))
+ dev_boot_line = "-netdev tap,id=%s,script=%s" % (
+ netdev_id,
+ options["opt_script"],
+ )
+ self.netdev_idx += 1
+ elif "opt_path" in list(options.keys()) and options["opt_path"]:
+ dev_boot_line = "-chardev socket"
+ char_id = "char%d" % self.char_idx
+ if "opt_server" in list(options.keys()) and options["opt_server"]:
+ dev_boot_line += (
+ separator
+ + "id=%s" % char_id
+ + separator
+ + "path=%s" % options["opt_path"]
+ + separator
+ + "%s" % options["opt_server"]
+ )
+ self.char_idx += 1
+ self.__add_boot_line(dev_boot_line)
+ else:
+ dev_boot_line += (
+ separator
+ + "id=%s" % char_id
+ + separator
+ + "path=%s" % options["opt_path"]
+ )
+ self.char_idx += 1
+ self.__add_boot_line(dev_boot_line)
+ # netdev parameter
+ netdev_id = "netdev%d" % self.netdev_idx
+ self.netdev_idx += 1
+ if "opt_queue" in list(options.keys()) and options["opt_queue"]:
+ queue_num = options["opt_queue"]
+ dev_boot_line = (
+ "-netdev type=vhost-user,id=%s,chardev=%s,vhostforce,queues=%s"
+ % (netdev_id, char_id, queue_num)
+ )
+ else:
+ dev_boot_line = (
+ "-netdev type=vhost-user,id=%s,chardev=%s,vhostforce"
+ % (netdev_id, char_id)
+ )
+ self.__add_boot_line(dev_boot_line)
+ # device parameter
+ opts = {"opt_netdev": "%s" % netdev_id}
+ if "opt_mac" in list(options.keys()) and options["opt_mac"]:
+ opts["opt_mac"] = options["opt_mac"]
+ if "opt_settings" in list(options.keys()) and options["opt_settings"]:
+ opts["opt_settings"] = options["opt_settings"]
+ if "opt_legacy" in list(options.keys()) and options["opt_legacy"]:
+ opts["opt_legacy"] = options["opt_legacy"]
+ self.__add_vm_virtio_net_pci(**opts)
+
+ def __add_vm_virtio_cuse_pci(self, **options):
+ """
+ driver virtio-net-pci
+ opt_mac: 52:54:00:00:00:01
+ """
+ separator = ","
+ dev_boot_line = "-netdev tap"
+ if "opt_tap" in list(options.keys()):
+ cuse_id = options["opt_tap"]
+ else:
+ cuse_id = "vhost%d" % self.cuse_id
+ self.cuse_id += 1
+ dev_boot_line += (
+ separator
+ + "id=%s" % cuse_id
+ + separator
+ + "ifname=tap_%s" % cuse_id
+ + separator
+ + "vhost=on"
+ + separator
+ + "script=no"
+ )
+ self.__add_boot_line(dev_boot_line)
+ # device parameter
+ opts = {"opt_netdev": "%s" % cuse_id, "opt_id": "%s_net" % cuse_id}
+ if "opt_mac" in list(options.keys()) and options["opt_mac"]:
+ opts["opt_mac"] = options["opt_mac"]
+ if "opt_settings" in list(options.keys()) and options["opt_settings"]:
+ opts["opt_settings"] = options["opt_settings"]
+
+ self.__add_vm_virtio_net_pci(**opts)
+
+ def __add_vm_virtio_net_pci(self, **options):
+ """
+ driver: virtio-net-pci
+ opt_netdev: mynet1
+ opt_id: net1
+ opt_mac: 00:00:00:00:01:03
+ opt_bus: pci.0
+ opt_addr: 0x3
+ opt_settings: csum=off,gso=off,guest_csum=off
+ """
+ dev_boot_line = "-device virtio-net-pci"
+ separator = ","
+ if "opt_netdev" in list(options.keys()) and options["opt_netdev"]:
+ dev_boot_line += separator + "netdev=%s" % options["opt_netdev"]
+ if "opt_id" in list(options.keys()) and options["opt_id"]:
+ dev_boot_line += separator + "id=%s" % options["opt_id"]
+ if "opt_mac" in list(options.keys()) and options["opt_mac"]:
+ dev_boot_line += separator + "mac=%s" % options["opt_mac"]
+ if "opt_bus" in list(options.keys()) and options["opt_bus"]:
+ dev_boot_line += separator + "bus=%s" % options["opt_bus"]
+ if "opt_addr" in list(options.keys()) and options["opt_addr"]:
+ dev_boot_line += separator + "addr=%s" % options["opt_addr"]
+ if "opt_legacy" in list(options.keys()) and options["opt_legacy"]:
+ dev_boot_line += separator + "disable-modern=%s" % options["opt_legacy"]
+ if "opt_settings" in list(options.keys()) and options["opt_settings"]:
+ dev_boot_line += separator + "%s" % options["opt_settings"]
+
+ if self.__string_has_multi_fields(dev_boot_line, separator):
+ self.__add_boot_line(dev_boot_line)
+
+ def __string_has_multi_fields(self, string, separator, field_num=2):
+ """
+ Check if string has multiple fields which are splitted with
+ specified separator.
+ """
+ fields = string.split(separator)
+ number = 0
+ for field in fields:
+ if field:
+ number += 1
+ if number >= field_num:
+ return True
+ else:
+ return False
+
+ def set_vm_monitor(self):
+ """
+ Set VM boot option to enable qemu monitor.
+ """
+ index = self.find_option_index("monitor")
+ if index:
+ self.params[index] = {
+ "monitor": [{"path": "/tmp/%s_monitor.sock" % (self.vm_name)}]
+ }
+ else:
+ self.params.append(
+ {"monitor": [{"path": "/tmp/%s_monitor.sock" % (self.vm_name)}]}
+ )
+
+ def add_vm_monitor(self, **options):
+ """
+ path: if adding monitor to vm, need to specify unix socket path
+ """
+ if "path" in list(options.keys()):
+ monitor_boot_line = "-monitor unix:%s,server,nowait" % options["path"]
+ self.__add_boot_line(monitor_boot_line)
+ self.monitor_sock_path = options["path"]
+ else:
+ self.monitor_sock_path = None
+
+ def add_vm_migration(self, **options):
+ """
+ enable: yes
+ port: tcp port for live migration
+ """
+ migrate_cmd = "-incoming tcp::%(migrate_port)s"
+
+ if "enable" in list(options.keys()):
+ if options["enable"] == "yes":
+ if "port" in list(options.keys()):
+ self.migrate_port = options["port"]
+ else:
+ self.migrate_port = str(
+ self.virt_pool.alloc_port(self.vm_name), port_type="migrate"
+ )
+ migrate_boot_line = migrate_cmd % {"migrate_port": self.migrate_port}
+ self.__add_boot_line(migrate_boot_line)
+
+ def set_vm_control(self, **options):
+ """
+ Set control session options
+ """
+ if "type" in list(options.keys()):
+ self.control_type = options["type"]
+ else:
+ self.control_type = "telnet"
+
+ index = self.find_option_index("control")
+ if index:
+ self.params[index] = {"control": [{"type": self.control_type}]}
+ else:
+ self.params.append({"control": [{"type": self.control_type}]})
+
+ def add_vm_control(self, **options):
+ """
+ Add control method for VM management
+ type : 'telnet' | 'socket' | 'qga'
+ """
+ separator = " "
+
+ self.control_type = options["type"]
+ if self.control_type == "telnet":
+ if "port" in options:
+ self.serial_port = int(options["port"])
+ else:
+ self.serial_port = self.virt_pool.alloc_port(
+ self.vm_name, port_type="serial"
+ )
+ control_boot_line = "-serial telnet::%d,server,nowait" % self.serial_port
+ elif self.control_type == "socket":
+ self.serial_path = "/tmp/%s_serial.sock" % self.vm_name
+ control_boot_line = "-serial unix:%s,server,nowait" % self.serial_path
+ elif self.control_type == "qga":
+ qga_dev_id = "%(vm_name)s_qga0" % {"vm_name": self.vm_name}
+ self.qga_socket_path = "/tmp/%(vm_name)s_qga0.sock" % {
+ "vm_name": self.vm_name
+ }
+ self.qga_cmd_head = (
+ "~/QMP/qemu-ga-client --address=%s " % self.qga_socket_path
+ )
+ qga_boot_block = (
+ "-chardev socket,path=%(SOCK_PATH)s,server,nowait,id=%(ID)s"
+ + separator
+ + "-device virtio-serial"
+ + separator
+ + "-device virtserialport,chardev=%(ID)s,name=%(DEV_NAME)s"
+ )
+ control_boot_line = qga_boot_block % {
+ "SOCK_PATH": self.qga_socket_path,
+ "DEV_NAME": QGA_DEV_NAME,
+ "ID": qga_dev_id,
+ }
+
+ self.__add_boot_line(control_boot_line)
+
+ def connect_serial_port(self, name=""):
+ """
+ Connect to serial port and return connected session for usage
+ if connected failed will return None
+ """
+ shell_reg = r"(.*)# "
+ try:
+ if getattr(self, "control_session", None) is None:
+ self.control_session = self.host_session
+
+ self.control_session.send_command("socat %s STDIO" % self.serial_path)
+
+ # login message not output if timeout is too small
+ out = (
+ self.control_session.send_command("", timeout=5)
+ .replace("\r", "")
+ .replace("\n", "")
+ )
+
+ if len(out) == 0:
+ raise StartVMFailedException(
+ "Can't get output from [%s:%s]"
+ % (self.host_dut.crb["My IP"], self.vm_name)
+ )
+
+ m = re.match(shell_reg, out)
+ if m:
+ # dmidecode output contain #, so use other matched string
+ out = self.control_session.send_expect(
+ "dmidecode -t system",
+ "Product Name",
+ timeout=self.OPERATION_TIMEOUT,
+ )
+ # cleanup previous output
+ self.control_session.get_session_before(timeout=0.1)
+
+ # if still on host, need reconnect
+ if "QEMU" not in out:
+ raise StartVMFailedException("Not real login [%s]" % self.vm_name)
+ else:
+ # has enter into VM shell
+ return True
+
+ # login into Redhat os, not sure can work on all distributions
+ if self.LOGIN_PROMPT not in out:
+ raise StartVMFailedException("Can't login [%s] now!!!" % self.vm_name)
+ else:
+ self.control_session.send_expect(
+ "%s" % self.username,
+ self.PASSWORD_PROMPT,
+ timeout=self.LOGIN_TIMEOUT,
+ )
+ # system maybe busy here, enlarge timeout equal to login timeout
+ self.control_session.send_expect(
+ "%s" % self.password, "#", timeout=self.LOGIN_TIMEOUT
+ )
+ return self.control_session
+ except Exception as e:
+ # when exception happened, force close serial connection and reconnect
+ print(
+ RED(
+ "[%s:%s] exception [%s] happened"
+ % (self.host_dut.crb["My IP"], self.vm_name, str(e))
+ )
+ )
+ self.close_control_session(dut_id=self.host_dut.dut_id)
+ return False
+
+ def connect_telnet_port(self, name=""):
+ """
+ Connect to serial port and return connected session for usage
+ if connected failed will return None
+ """
+ shell_reg = r"(.*)# "
+ scan_cmd = "lsof -i:%d | grep telnet | awk '{print $2}'" % self.serial_port
+
+ try:
+ # assume serial is not connect
+ if getattr(self, "control_session", None) is None:
+ self.control_session = self.host_session
+
+ self.control_session.send_expect(
+ "telnet localhost %d" % self.serial_port,
+ "Connected to localhost",
+ timeout=self.OPERATION_TIMEOUT,
+ )
+
+ # output will be empty if timeout too small
+ out = (
+ self.control_session.send_command("", timeout=5)
+ .replace("\r", "")
+ .replace("\n", "")
+ )
+
+ # if no output from serial port, either connection close or system hang
+ if len(out) == 0:
+ raise StartVMFailedException(
+ "Can't get output from [%s]" % self.vm_name
+ )
+
+ # if enter into shell
+ m = re.match(shell_reg, out)
+ if m:
+ # dmidecode output contain #, so use other matched string
+ out = self.control_session.send_expect(
+ "dmidecode -t system",
+ "Product Name",
+ timeout=self.OPERATION_TIMEOUT,
+ )
+ # cleanup previous output
+ self.control_session.get_session_before(timeout=0.1)
+
+ # if still on host, need reconnect
+ if "QEMU" not in out:
+ raise StartVMFailedException("Not real login [%s]" % self.vm_name)
+ else:
+ # has enter into VM shell
+ return True
+
+ # login into Redhat os, not sure can work on all distributions
+ if ("x86_64 on an x86_64" not in out) and (self.LOGIN_PROMPT not in out):
+ print(
+ RED(
+ "[%s:%s] not ready for login"
+ % (self.host_dut.crb["My IP"], self.vm_name)
+ )
+ )
+ return False
+ else:
+ self.control_session.send_expect(
+ "%s" % self.username, "Password:", timeout=self.LOGIN_TIMEOUT
+ )
+ self.control_session.send_expect(
+ "%s" % self.password, "#", timeout=self.LOGIN_TIMEOUT
+ )
+ return True
+ except Exception as e:
+ # when exception happened, force close serial connection and reconnect
+ print(
+ RED(
+ "[%s:%s] exception [%s] happened"
+ % (self.host_dut.crb["My IP"], self.vm_name, str(e))
+ )
+ )
+ self.close_control_session(dut_id=self.host_dut.dut_id)
+ return False
+
+ def connect_qga_port(self, name=""):
+ """
+ QGA control session just share with host session
+ """
+ try:
+ # assume serial is not connect
+ if getattr(self, "control_session", None) is None:
+ self.control_session = self.host_session
+
+ self.control_session.send_expect(
+ "%s ping %d" % (self.qga_cmd_head, self.START_TIMEOUT),
+ "#",
+ timeout=self.START_TIMEOUT,
+ )
+
+ # here VM has been start and qga also ready
+ return True
+ except Exception as e:
+ # when exception happened, force close qga process and reconnect
+ print(
+ RED(
+ "[%s:%s] QGA not ready" % (self.host_dut.crb["My IP"], self.vm_name)
+ )
+ )
+ self.close_control_session(dut_id=self.host_dut.dut_id)
+ return False
+
+ def add_vm_vnc(self, **options):
+ """
+ Add VM display option
+ """
+ if "disable" in list(options.keys()) and options["disable"] == "True":
+ vnc_boot_line = "-display none"
+ else:
+ if "displayNum" in list(options.keys()) and options["displayNum"]:
+ display_num = options["displayNum"]
+ else:
+ display_num = self.virt_pool.alloc_port(
+ self.vm_name, port_type="display"
+ )
+
+ vnc_boot_line = "-vnc :%d" % int(display_num)
+
+ self.__add_boot_line(vnc_boot_line)
+
+ def set_vm_vnc(self, **options):
+ """
+ Set VM display options
+ """
+ if "disable" in list(options.keys()):
+ vnc_option = [{"disable": "True"}]
+ else:
+ if "displayNum" in list(options.keys()):
+ vnc_option = [{"displayNum": options["displayNum"]}]
+ else:
+ # will allocate vnc display later
+ vnc_option = [{"disable": "False"}]
+
+ index = self.find_option_index("vnc")
+ if index:
+ self.params[index] = {"vnc": vnc_option}
+ else:
+ self.params.append({"vnc": vnc_option})
+
+ def set_vm_daemon(self, enable="yes"):
+ """
+ Set VM daemon option.
+ """
+ index = self.find_option_index("daemon")
+ if index:
+ self.params[index] = {"daemon": [{"enable": "%s" % enable}]}
+ else:
+ self.params.append({"daemon": [{"enable": "%s" % enable}]})
+
+ def add_vm_daemon(self, **options):
+ """
+ enable: 'yes'
+ note:
+ By default VM will start with the daemonize status.
+ Not support starting it on the stdin now.
+ """
+ if "daemon" in list(options.keys()) and options["enable"] == "no":
+ pass
+ else:
+ daemon_boot_line = "-daemonize"
+ self.__add_boot_line(daemon_boot_line)
+
+ def add_vm_usercmd(self, **options):
+ """
+ usercmd: user self defined command line.
+ This command will be add into qemu boot command.
+ """
+ if "cmd" in list(options.keys()):
+ cmd = options["cmd"]
+ self.__add_boot_line(cmd)
+
+ def add_vm_crypto(self, **options):
+ """
+ Add VM crypto options
+ """
+ separator = " "
+
+ if "enable" in list(options.keys()) and options["enable"] == "yes":
+ if "opt_num" in list(options.keys()):
+ opt_num = int(options["opt_num"])
+ else:
+ opt_num = 1
+
+ for id in range(opt_num):
+ cryptodev_id = "%(vm_name)s_crypto%(id)s" % {
+ "vm_name": self.vm_name,
+ "id": id,
+ }
+ cryptodev_soch_path = "/tmp/%(vm_name)s_crypto%(id)s.sock" % {
+ "vm_name": self.vm_name,
+ "id": id,
+ }
+
+ crypto_boot_block = (
+ "-chardev socket,path=%(SOCK_PATH)s,id=%(ID)s"
+ + separator
+ + "-object cryptodev-vhost-user,id=cryptodev%(id)s,chardev=%(ID)s"
+ + separator
+ + "-device virtio-crypto-pci,id=crypto%(id)s,cryptodev=cryptodev%(id)s"
+ )
+ crypto_boot_line = crypto_boot_block % {
+ "SOCK_PATH": cryptodev_soch_path,
+ "ID": cryptodev_id,
+ "id": id,
+ }
+ self.__add_boot_line(crypto_boot_line)
+
+ def _check_vm_status(self):
+ """
+ Check and restart QGA if not ready, wait for network ready
+ """
+ self.__wait_vm_ready()
+
+ self.__wait_vmnet_ready()
+
+ def _attach_vm(self):
+ """
+ Attach VM
+ Collected information : serial/monitor/qga sock file
+ : hostfwd address
+ """
+ self.am_attached = True
+
+ if not self._query_pid():
+ raise StartVMFailedException("Can't strip process pid!!!")
+
+ cmdline = self.host_session.send_expect("cat /proc/%d/cmdline" % self.pid, "# ")
+ qemu_boot_line = cmdline.replace("\x00", " ")
+ self.qemu_boot_line = qemu_boot_line.split(" ", 1)[1]
+ self.qemu_emulator = qemu_boot_line.split(" ", 1)[0]
+
+ serial_reg = ".*serial\x00unix:(.*?),"
+ telnet_reg = ".*serial\x00telnet::(\d+),"
+ monitor_reg = ".*monitor\x00unix:(.*?),"
+ hostfwd_reg = ".*hostfwd=tcp:(.*):(\d+)-:"
+ migrate_reg = ".*incoming\x00tcp::(\d+)"
+
+ # support both telnet and unix domain socket serial device
+ m = re.match(serial_reg, cmdline)
+ if not m:
+ m1 = re.match(telnet_reg, cmdline)
+ if not m1:
+ raise StartVMFailedException("No serial sock available!!!")
+ else:
+ self.serial_port = int(m1.group(1))
+ self.control_type = "telnet"
+ else:
+ self.serial_path = m.group(1)
+ self.control_type = "socket"
+
+ m = re.match(monitor_reg, cmdline)
+ if not m:
+ raise StartVMFailedException("No monitor sock available!!!")
+ self.monitor_sock_path = m.group(1)
+
+ m = re.match(hostfwd_reg, cmdline)
+ if not m:
+ raise StartVMFailedException("No host fwd config available!!!")
+
+ self.net_type = "hostfwd"
+ self.host_port = m.group(2)
+ self.hostfwd_addr = m.group(1) + ":" + self.host_port
+
+ # record start time, need call before check_vm_status
+ self.start_time = time.time()
+
+ try:
+ self.update_status()
+ except:
+ self.host_logger.error("Can't query vm status!!!")
+
+ if self.vm_status is not ST_PAUSE:
+ self._check_vm_status()
+ else:
+ m = re.match(migrate_reg, cmdline)
+ if not m:
+ raise StartVMFailedException("No migrate port available!!!")
+
+ self.migrate_port = int(m.group(1))
+
+ def _start_vm(self):
+ """
+ Start VM.
+ """
+ self.__alloc_assigned_pcis()
+
+ qemu_boot_line = self.generate_qemu_boot_line()
+
+ self.__send_qemu_cmd(qemu_boot_line, dut_id=self.host_dut.dut_id)
+
+ self.__get_pci_mapping()
+
+ # query status
+ self.update_status()
+
+ # sleep few seconds for bios/grub
+ time.sleep(10)
+
+ # when vm is waiting for migration, can't ping
+ if self.vm_status is not ST_PAUSE:
+ self.__wait_vm_ready()
+
+ self.__wait_vmnet_ready()
+
+ # Start VM using the qemu command
+ # lock critical action like start qemu
+ @parallel_lock(num=4)
+ def __send_qemu_cmd(self, qemu_boot_line, dut_id):
+ # add more time for qemu start will be slow when system is busy
+ ret = self.host_session.send_expect(
+ qemu_boot_line, "# ", verify=True, timeout=30
+ )
+
+ # record start time
+ self.start_time = time.time()
+
+ # wait for qemu process ready
+ time.sleep(2)
+ if type(ret) is int and ret != 0:
+ raise StartVMFailedException("Start VM failed!!!")
+
+ def _quick_start_vm(self):
+ self.__alloc_assigned_pcis()
+
+ qemu_boot_line = self.generate_qemu_boot_line()
+
+ self.__send_qemu_cmd(qemu_boot_line, dut_id=self.host_dut.dut_id)
+
+ self.__get_pci_mapping()
+
+ # query status
+ self.update_status()
+
+ # sleep few seconds for bios and grub
+ time.sleep(10)
+
+ def __ping_vm(self):
+ logged_in = False
+ cur_time = time.time()
+ time_diff = cur_time - self.start_time
+ try_times = 0
+ while time_diff < self.START_TIMEOUT:
+ if self.control_command("ping") == "Success":
+ logged_in = True
+ break
+
+ # update time consume
+ cur_time = time.time()
+ time_diff = cur_time - self.start_time
+
+ self.host_logger.warning(
+ "Can't login [%s] on [%s], retry %d times!!!"
+ % (self.vm_name, self.host_dut.crb["My IP"], try_times + 1)
+ )
+ time.sleep(self.OPERATION_TIMEOUT)
+ try_times += 1
+ continue
+
+ return logged_in
+
+ def __wait_vm_ready(self):
+ logged_in = self.__ping_vm()
+ if not logged_in:
+ if not self.restarted:
+ # make sure serial session has been quit
+ self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.vm_status = ST_NOTSTART
+ self._stop_vm()
+ self.restarted = True
+ self._start_vm()
+ else:
+ raise StartVMFailedException(
+ "Not response in %d seconds!!!" % self.START_TIMEOUT
+ )
+
+ def start_migration(self, remote_ip, remote_port):
+ """
+ Send migration command to host and check whether start migration
+ """
+ # send migration command
+ migration_port = "tcp:%(IP)s:%(PORT)s" % {"IP": remote_ip, "PORT": remote_port}
+
+ self.__monitor_session("migrate", "-d", migration_port)
+ time.sleep(2)
+ out = self.__monitor_session("info", "migrate")
+ if "Migration status: active" in out:
+ return True
+ else:
+ return False
+
+ def wait_migration_done(self):
+ """
+ Wait for migration done. If not finished after three minutes
+ will raise exception.
+ """
+ # wait for migration done
+ count = 30
+ while count:
+ out = self.__monitor_session("info", "migrate")
+ if "completed" in out:
+ self.host_logger.info("%s" % out)
+ # after migration done, status is pause
+ self.vm_status = ST_PAUSE
+ return True
+
+ time.sleep(6)
+ count -= 1
+
+ raise StartVMFailedException(
+ "Virtual machine can not finished in 180 seconds!!!"
+ )
+
+ def generate_qemu_boot_line(self):
+ """
+ Generate the whole QEMU boot line.
+ """
+ if self.vcpus_pinned_to_vm:
+ vcpus = self.vcpus_pinned_to_vm.replace(" ", ",")
+ qemu_boot_line = (
+ "taskset -c %s " % vcpus
+ + self.qemu_emulator
+ + " "
+ + self.qemu_boot_line
+ )
+ else:
+ qemu_boot_line = self.qemu_emulator + " " + self.qemu_boot_line
+
+ return qemu_boot_line
+
+ def __get_vmnet_pci(self):
+ """
+ Get PCI ID of access net interface on VM
+ """
+ if not getattr(self, "nic_model", None) is None:
+ pci_reg = r"^.*Bus(\s+)(\d+), device(\s+)(\d+), function (\d+)"
+ dev_reg = r"^.*Ethernet controller:.*([a-fA-F0-9]{4}:[a-fA-F0-9]{4})"
+ if self.nic_model == "e1000":
+ dev_id = "8086:100e"
+ elif self.nic_model == "i82551":
+ dev_id = "8086:1209"
+ elif self.nic_model == "virtio":
+ dev_id = "1af4:1000"
+ out = self.__monitor_session("info", "pci")
+ lines = out.split("\r\n")
+ for line in lines:
+ m = re.match(pci_reg, line)
+ o = re.match(dev_reg, line)
+ if m:
+ pci = "%02d:%02d.%d" % (
+ int(m.group(2)),
+ int(m.group(4)),
+ int(m.group(5)),
+ )
+ if o:
+ if o.group(1) == dev_id:
+ self.net_nic_pci = pci
+
+ def __wait_vmnet_ready(self):
+ """
+ wait for 120 seconds for vm net ready
+ 10.0.2.* is the default ip address allocated by qemu
+ """
+ cur_time = time.time()
+ time_diff = cur_time - self.start_time
+ try_times = 0
+ network_ready = False
+ while time_diff < self.START_TIMEOUT:
+ if getattr(self, "net_nic_pci", None) is None:
+ self.__get_vmnet_pci()
+ if self.control_command("network") == "Success":
+ pos = self.hostfwd_addr.find(":")
+ ssh_key = "[" + self.hostfwd_addr[:pos] + "]" + self.hostfwd_addr[pos:]
+ os.system("ssh-keygen -R %s" % ssh_key)
+ network_ready = True
+ break
+
+ # update time consume
+ cur_time = time.time()
+ time_diff = cur_time - self.start_time
+
+ self.host_logger.warning(
+ "[%s] on [%s] network not ready, retry %d times!!!"
+ % (self.vm_name, self.host_dut.crb["My IP"], try_times + 1)
+ )
+ time.sleep(self.OPERATION_TIMEOUT)
+ try_times += 1
+ continue
+
+ if network_ready:
+ return True
+ else:
+ raise StartVMFailedException("Virtual machine control net not ready!!!")
+
+ def __alloc_vcpus(self):
+ """
+ Allocate virtual CPUs for VM.
+ """
+ req_cpus = self.vcpus_pinned_to_vm.split()
+ cpus = self.virt_pool.alloc_cpu(vm=self.vm_name, corelist=req_cpus)
+
+ if len(req_cpus) != len(cpus):
+ self.host_logger.warning(
+ "VCPUs not enough, required [ %s ], just [ %s ]" % (req_cpus, cpus)
+ )
+ raise Exception("No enough required vcpus!!!")
+
+ vcpus_pinned_to_vm = ""
+ for cpu in cpus:
+ vcpus_pinned_to_vm += "," + cpu
+ vcpus_pinned_to_vm = vcpus_pinned_to_vm.lstrip(",")
+
+ return vcpus_pinned_to_vm
+
+ def __alloc_assigned_pcis(self):
+ """
+ Record the PCI device info
+ Struct: {dev pci: {'is_vf': [True | False],
+ 'pf_pci': pci}}
+ example:
+ {'08:10.0':{'is_vf':True, 'pf_pci': 08:00.0}}
+ """
+ assigned_pcis_info = {}
+ for pci in self.assigned_pcis:
+ assigned_pcis_info[pci] = {}
+ if self.__is_vf_pci(pci):
+ assigned_pcis_info[pci]["is_vf"] = True
+ pf_pci = self.__map_vf_to_pf(pci)
+ assigned_pcis_info[pci]["pf_pci"] = pf_pci
+ if self.virt_pool.alloc_vf_from_pf(
+ vm=self.vm_name, pf_pci=pf_pci, *[pci]
+ ):
+ port = self.__get_vf_port(pci)
+ port.unbind_driver()
+ port.bind_driver("pci-stub")
+ else:
+ # check that if any VF of specified PF has been
+ # used, raise exception
+ vf_pci = self.__vf_has_been_assigned(pci, **assigned_pcis_info)
+ if vf_pci:
+ raise Exception(
+ "Error: A VF [%s] generated by PF [%s] has " % (vf_pci, pci)
+ + "been assigned to VM, so this PF can not be "
+ + "assigned to VM again!"
+ )
+ # get the port instance of PF
+ port = self.__get_net_device_by_pci(pci)
+
+ if self.virt_pool.alloc_pf(vm=self.vm_name, *[pci]):
+ port.unbind_driver()
+
+ def __is_vf_pci(self, dev_pci):
+ """
+ Check if the specified PCI dev is a VF.
+ """
+ for port_info in self.host_dut.ports_info:
+ if "sriov_vfs_pci" in list(port_info.keys()):
+ if dev_pci in port_info["sriov_vfs_pci"]:
+ return True
+ return False
+
+ def __map_vf_to_pf(self, dev_pci):
+ """
+ Map the specified VF to PF.
+ """
+ for port_info in self.host_dut.ports_info:
+ if "sriov_vfs_pci" in list(port_info.keys()):
+ if dev_pci in port_info["sriov_vfs_pci"]:
+ return port_info["pci"]
+ return None
+
+ def __get_vf_port(self, dev_pci):
+ """
+ Get the NetDevice instance of specified VF.
+ """
+ for port_info in self.host_dut.ports_info:
+ if "vfs_port" in list(port_info.keys()):
+ for port in port_info["vfs_port"]:
+ if dev_pci == port.pci:
+ return port
+ return None
+
+ def __vf_has_been_assigned(self, pf_pci, **assigned_pcis_info):
+ """
+ Check if the specified VF has been used.
+ """
+ for pci in list(assigned_pcis_info.keys()):
+ if (
+ assigned_pcis_info[pci]["is_vf"]
+ and assigned_pcis_info[pci]["pf_pci"] == pf_pci
+ ):
+ return pci
+ return False
+
+ def __get_net_device_by_pci(self, net_device_pci):
+ """
+ Get NetDevice instance by the specified PCI bus number.
+ """
+ port_info = self.host_dut.get_port_info(net_device_pci)
+ return port_info["port"]
+
+ def get_vm_ip(self):
+ """
+ Get VM IP.
+ """
+ get_vm_ip = getattr(self, "get_vm_ip_%s" % self.net_type)
+ return get_vm_ip()
+
+ def get_vm_ip_hostfwd(self):
+ """
+ Get IP which VM is connected by hostfwd.
+ """
+ return self.hostfwd_addr
+
+ def get_vm_ip_bridge(self):
+ """
+ Get IP which VM is connected by bridge.
+ """
+ out = self.control_command("ping")
+ if not out:
+ time.sleep(10)
+ out = self.control_command("ifconfig")
+ ips = re.findall(r"inet (\d+\.\d+\.\d+\.\d+)", out)
+
+ if "127.0.0.1" in ips:
+ ips.remove("127.0.0.1")
+
+ num = 3
+ for ip in ips:
+ out = self.host_session.send_expect("ping -c %d %s" % (num, ip), "# ")
+ if "0% packet loss" in out:
+ return ip
+ return ""
+
+ def __get_pci_mapping(self):
+ devices = self.__strip_guest_pci()
+ for hostpci in self.pt_devices:
+ index = self.pt_devices.index(hostpci)
+ pt_id = "pt_%d" % index
+ pci_map = {}
+ for device in devices:
+ if device["id"] == pt_id:
+ pci_map["hostpci"] = hostpci
+ pci_map["guestpci"] = device["pci"]
+ self.pci_maps.append(pci_map)
+
+ def get_pci_mappings(self):
+ """
+ Return guest and host pci devices mapping structure
+ """
+ return self.pci_maps
+
+ def __monitor_session(self, command, *args):
+ """
+ Connect the qemu monitor session, send command and return output message.
+ """
+ if not self.monitor_sock_path:
+ self.host_logger.info(
+ "No monitor between on host [ %s ] for guest [ %s ]"
+ % (self.host_dut.NAME, self.vm_name)
+ )
+ return None
+
+ self.host_session.send_expect("nc -U %s" % self.monitor_sock_path, "(qemu)")
+
+ cmd = command
+ for arg in args:
+ cmd += " " + str(arg)
+
+ # after quit command, qemu will exit
+ if "quit" in cmd:
+ self.host_session.send_command("%s" % cmd)
+ out = self.host_session.send_expect(" ", "#")
+ else:
+ out = self.host_session.send_expect("%s" % cmd, "(qemu)", 30)
+ self.host_session.send_expect("^C", "# ")
+ return out
+
+ def update_status(self):
+ """
+ Query and update VM status
+ """
+ out = self.__monitor_session("info", "status")
+ self.host_logger.warning("Virtual machine status: %s" % out)
+
+ if "paused" in out:
+ self.vm_status = ST_PAUSE
+ elif "running" in out:
+ self.vm_status = ST_RUNNING
+ else:
+ self.vm_status = ST_UNKNOWN
+
+ info = self.host_session.send_expect("cat %s" % self.__pid_file, "# ")
+ try:
+ pid = int(info.split()[0])
+ # save pid into dut structure
+ self.host_dut.virt_pids.append(pid)
+ except:
+ self.host_logger.info("Failed to capture pid!!!")
+
+ def _query_pid(self):
+ info = self.host_session.send_expect("cat %s" % self.__pid_file, "# ")
+ try:
+ # sometimes saw to lines in pid file
+ pid = int(info.splitlines()[0])
+ # save pid into dut structure
+ self.pid = pid
+ return True
+ except:
+ return False
+
+ def __strip_guest_pci(self):
+ """
+ Strip all pci-passthrough device information, based on qemu monitor
+ """
+ pci_reg = r"^.*Bus(\s+)(\d+), device(\s+)(\d+), function (\d+)"
+ id_reg = r"^.*id \"(.*)\""
+
+ pcis = []
+ out = self.__monitor_session("info", "pci")
+
+ if out is None:
+ return pcis
+
+ lines = out.split("\r\n")
+
+ for line in lines:
+ m = re.match(pci_reg, line)
+ n = re.match(id_reg, line)
+ if m:
+ pci = "%02d:%02d.%d" % (
+ int(m.group(2)),
+ int(m.group(4)),
+ int(m.group(5)),
+ )
+ if n:
+ dev_id = n.group(1)
+ if dev_id != "":
+ pt_dev = {}
+ pt_dev["pci"] = pci
+ pt_dev["id"] = dev_id
+ pcis.append(pt_dev)
+
+ return pcis
+
+ def __strip_guest_core(self):
+ """
+ Strip all lcore-thread binding information
+ Return array will be [thread0, thread1, ...]
+ """
+ cores = []
+ # CPU #0: pc=0xffffffff8104c416 (halted) thread_id=40677
+ core_reg = r"^.*CPU #(\d+): (.*) thread_id=(\d+)"
+ out = self.__monitor_session("info", "cpus")
+
+ if out is None:
+ return cores
+
+ lines = out.split("\r\n")
+ for line in lines:
+ m = re.match(core_reg, line)
+ if m:
+ cores.append(int(m.group(3)))
+
+ return cores
+
+ def quit_control_session(self):
+ """
+ Quit from serial session gracefully
+ """
+ if self.control_type == "socket":
+ self.control_session.send_expect("^C", "# ")
+ elif self.control_type == "telnet":
+ self.control_session.send_command("^]")
+ self.control_session.send_command("quit")
+ # nothing need to do for qga session
+ self.control_session = None
+
+ @parallel_lock()
+ def close_control_session(self, dut_id):
+ """
+ Force kill serial connection from DUT when exception happened
+ """
+ # return control_session to host_session
+ if self.control_type == "socket":
+ scan_cmd = (
+ "ps -e -o pid,cmd |grep 'socat %s STDIO' |grep -v grep"
+ % self.serial_path
+ )
+ out = self.host_dut.send_expect(scan_cmd, "#")
+ proc_info = out.strip().split()
+ try:
+ pid = int(proc_info[0])
+ self.host_dut.send_expect("kill %d" % pid, "#")
+ except:
+ pass
+ self.host_dut.send_expect("", "# ")
+ elif self.control_type == "telnet":
+ scan_cmd = "lsof -i:%d | grep telnet | awk '{print $2}'" % self.serial_port
+ proc_info = self.host_dut.send_expect(scan_cmd, "#")
+ try:
+ pid = int(proc_info)
+ self.host_dut.send_expect("kill %d" % pid, "#")
+ except:
+ pass
+ elif self.control_type == "qga":
+ scan_cmd = (
+ "ps -e -o pid,cmd |grep 'address=%s' |grep -v grep"
+ % self.qga_socket_path
+ )
+ out = self.host_dut.send_expect(scan_cmd, "#")
+ proc_info = out.strip().split()
+ try:
+ pid = int(proc_info[0])
+ self.host_dut.send_expect("kill %d" % pid, "#")
+ except:
+ pass
+
+ self.control_session = None
+ return
+
+ @handle_control_session
+ def control_command(self, command):
+ """
+ Use the serial port to control VM.
+ Note:
+ :command: there are these commands as below:
+ ping, network, powerdown
+ :args: give different args by the different commands.
+ """
+
+ if command == "ping":
+ if self.control_type == "qga":
+ return "Success"
+ else:
+ # disable stty input characters for send_expect function
+ self.control_session.send_expect(
+ "stty -echo", "#", timeout=self.OPERATION_TIMEOUT
+ )
+ return "Success"
+ elif command == "network":
+ if self.control_type == "qga":
+ # wait few seconds for network ready
+ time.sleep(5)
+ out = self.control_session.send_expect(
+ self.qga_cmd_head + "ifconfig", "#", timeout=self.OPERATION_TIMEOUT
+ )
+ else:
+ pci = "00:1f.0"
+ if not getattr(self, "net_nic_pci", None) is None:
+ pci = self.net_nic_pci
+ ## If interface is vritio model, net file will be under virtio* directory
+ if self.nic_model == "virtio":
+ pci += "/virtio*/"
+
+ intf = self.control_session.send_expect(
+ "ls -1 /sys/bus/pci/devices/0000:%s/net" % pci,
+ "#",
+ timeout=self.OPERATION_TIMEOUT,
+ )
+ out = self.control_session.send_expect(
+ "ifconfig %s" % intf, "#", timeout=self.OPERATION_TIMEOUT
+ )
+ if "10.0.2" not in out:
+ self.control_session.send_expect(
+ "dhclient %s -timeout 10" % intf, "#", timeout=30
+ )
+ else:
+ return "Success"
+
+ out = self.control_session.send_expect(
+ "ifconfig", "#", timeout=self.OPERATION_TIMEOUT
+ )
+
+ if "10.0.2" not in out:
+ return "Failed"
+ else:
+ return "Success"
+ elif command == "powerdown":
+ if self.control_type == "qga":
+ self.control_session.send_expect(
+ self.qga_cmd_head + "powerdown", "#", timeout=self.OPERATION_TIMEOUT
+ )
+ else:
+ self.control_session.send_command("init 0")
+
+ if self.control_type == "socket":
+ self.control_session.send_expect("^C", "# ")
+ elif self.control_type == "telnet":
+ self.control_session.send_command("^]")
+ self.control_session.send_command("quit")
+
+ time.sleep(10)
+ self.kill_alive()
+ return "Success"
+ else:
+ if self.control_type == "qga":
+ self.host_logger.warning("QGA not support [%s] command" % command)
+ out = "Failed"
+ else:
+ out = self.control_session.send_command(command)
+ return out
+
+ def _stop_vm(self):
+ """
+ Stop VM.
+ """
+ if self.vm_status is ST_RUNNING:
+ self.control_command("powerdown")
+ else:
+ self.__monitor_session("quit")
+ time.sleep(5)
+ # remove temporary file
+ self.host_session.send_expect("rm -f %s" % self.__pid_file, "#")
+
+ def pin_threads(self, lcores):
+ """
+ Pin thread to assigned cores
+ """
+ thread_reg = r"CPU #(\d+): .* thread_id=(\d+)"
+ output = self.__monitor_session("info", "cpus")
+ thread_cores = re.findall(thread_reg, output)
+ cores_map = list(zip(thread_cores, lcores))
+ for thread_info, core_id in cores_map:
+ cpu_id, thread_id = thread_info
+ self.host_session.send_expect(
+ "taskset -pc %d %s" % (core_id, thread_id), "#"
+ )
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 05/23] dts: merge DTS framework/qemu_libvirt.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (3 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 04/23] dts: merge DTS framework/qemu_kvm.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 06/23] dts: merge DTS framework/test_capabilities.py " Juraj Linkeš
` (17 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/qemu_libvirt.py | 884 ++++++++++++++++++++++++++++++++++
1 file changed, 884 insertions(+)
create mode 100644 dts/framework/qemu_libvirt.py
diff --git a/dts/framework/qemu_libvirt.py b/dts/framework/qemu_libvirt.py
new file mode 100644
index 0000000000..740b7bbc55
--- /dev/null
+++ b/dts/framework/qemu_libvirt.py
@@ -0,0 +1,884 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import time
+import xml.etree.ElementTree as ET
+from xml.dom import minidom
+from xml.etree.ElementTree import ElementTree
+
+import framework.utils as utils
+
+from .config import VIRTCONF, VirtConf
+from .dut import Dut
+from .exception import StartVMFailedException
+from .logger import getLogger
+from .ssh_connection import SSHConnection
+from .virt_base import VirtBase
+from .virt_resource import VirtResource
+
+
+class LibvirtKvm(VirtBase):
+ DEFAULT_BRIDGE = "br0"
+ QEMU_IFUP = (
+ "#!/bin/sh\n\n"
+ + "set -x\n\n"
+ + "switch=%(switch)s\n\n"
+ + "if [ -n '$1' ];then\n"
+ + " tunctl -t $1\n"
+ + " ip link set $1 up\n"
+ + " sleep 0.5s\n"
+ + " brctl addif $switch $1\n"
+ + " exit 0\n"
+ + "else\n"
+ + " echo 'Error: no interface specified'\n"
+ + " exit 1\n"
+ + "fi"
+ )
+ QEMU_IFUP_PATH = "/etc/qemu-ifup"
+
+ def __init__(self, dut, name, suite):
+ # initialize virtualization base module
+ super(LibvirtKvm, self).__init__(dut, name, suite)
+
+ # initialize qemu emulator, example: qemu-system-x86_64
+ self.qemu_emulator = self.get_qemu_emulator()
+
+ self.logger = dut.logger
+ # disk and pci device default index
+ self.diskindex = "a"
+ self.controllerindex = 0
+ self.pciindex = 10
+
+ # configure root element
+ self.root = ElementTree()
+ self.domain = ET.Element("domain")
+ # replace root element
+ self.root._setroot(self.domain)
+ # add xml header
+ self.domain.set("type", "kvm")
+ self.domain.set("xmlns:qemu", "http://libvirt.org/schemas/domain/qemu/1.0")
+ ET.SubElement(self.domain, "name").text = name
+
+ # devices pass-through into vm
+ self.pci_maps = []
+
+ # default login user,password
+ self.username = self.host_dut.crb["user"]
+ self.password = self.host_dut.crb["pass"]
+
+ # internal variable to track whether default nic has been added
+ self.__default_nic = False
+ self.__default_nic_pci = ""
+
+ # set some default values for vm,
+ # if there is not the values of the specified options
+ self.set_vm_default()
+
+ def get_qemu_emulator(self):
+ """
+ Get the qemu emulator based on the crb.
+ """
+ arch = self.host_session.send_expect("uname -m", "# ")
+ return "/usr/bin/qemu-system-" + arch
+
+ def get_virt_type(self):
+ return "LIBVIRT"
+
+ def has_virtual_ability(self):
+ """
+ check and setup host virtual ability
+ """
+ arch = self.host_session.send_expect("uname -m", "# ")
+ if arch == "aarch64":
+ out = self.host_session.send_expect("service libvirtd status", "# ")
+ if "active (running)" not in out:
+ return False
+ return True
+
+ out = self.host_session.send_expect("cat /proc/cpuinfo | grep flags", "# ")
+ rgx = re.search(" vmx ", out)
+ if rgx:
+ pass
+ else:
+ self.host_logger.warning("Hardware virtualization " "disabled on host!!!")
+ return False
+
+ out = self.host_session.send_expect("lsmod | grep kvm", "# ")
+ if "kvm" not in out or "kvm_intel" not in out:
+ return False
+
+ out = self.host_session.send_expect("service libvirtd status", "# ")
+ if "active (running)" not in out:
+ return False
+
+ return True
+
+ def load_virtual_mod(self):
+ self.host_session.send_expect("modprobe kvm", "# ")
+ self.host_session.send_expect("modprobe kvm_intel", "# ")
+
+ def unload_virtual_mod(self):
+ self.host_session.send_expect("rmmod kvm_intel", "# ")
+ self.host_session.send_expect("rmmod kvm", "# ")
+
+ def disk_image_is_ok(self, image):
+ """
+ Check if the image is OK and no error.
+ """
+ pass
+
+ def add_vm_mem(self, **options):
+ """
+ Options:
+ size : memory size, measured in MB
+ hugepage : guest memory allocated using hugepages
+ """
+ if "size" in list(options.keys()):
+ memory = ET.SubElement(self.domain, "memory", {"unit": "MB"})
+ memory.text = options["size"]
+ if "hugepage" in list(options.keys()):
+ memoryBacking = ET.SubElement(self.domain, "memoryBacking")
+ ET.SubElement(memoryBacking, "hugepages")
+
+ def set_vm_cpu(self, **options):
+ """
+ Set VM cpu.
+ """
+ index = self.find_option_index("cpu")
+ if index:
+ self.params[index] = {"cpu": [options]}
+ else:
+ self.params.append({"cpu": [options]})
+
+ def add_vm_cpu(self, **options):
+ """
+ 'number' : '4' #number of vcpus
+ 'cpupin' : '3 4 5 6' # host cpu list
+ """
+ vcpu = 0
+ if "number" in list(options.keys()):
+ vmcpu = ET.SubElement(self.domain, "vcpu", {"placement": "static"})
+ vmcpu.text = options["number"]
+ if "cpupin" in list(options.keys()):
+ cputune = ET.SubElement(self.domain, "cputune")
+ # cpu resource will be allocated
+ req_cpus = options["cpupin"].split()
+ cpus = self.virt_pool.alloc_cpu(vm=self.vm_name, corelist=req_cpus)
+ for cpu in cpus:
+ ET.SubElement(cputune, "vcpupin", {"vcpu": "%d" % vcpu, "cpuset": cpu})
+ vcpu += 1
+ else: # request cpu from vm resource pool
+ cpus = self.virt_pool.alloc_cpu(self.vm_name, number=int(options["number"]))
+ for cpu in cpus:
+ ET.SubElement(cputune, "vcpupin", {"vcpu": "%d" % vcpu, "cpuset": cpu})
+ vcpu += 1
+
+ def get_vm_cpu(self):
+ cpus = self.virt_pool.get_cpu_on_vm(self.vm_name)
+ return cpus
+
+ def add_vm_qga(self, options):
+ qemu = ET.SubElement(self.domain, "qemu:commandline")
+ ET.SubElement(qemu, "qemu:arg", {"value": "-chardev"})
+ ET.SubElement(
+ qemu,
+ "qemu:arg",
+ {
+ "value": "socket,path=/tmp/"
+ + "%s_qga0.sock," % self.vm_name
+ + "server,nowait,id=%s_qga0" % self.vm_name
+ },
+ )
+ ET.SubElement(qemu, "qemu:arg", {"value": "-device"})
+ ET.SubElement(qemu, "qemu:arg", {"value": "virtio-serial"})
+ ET.SubElement(qemu, "qemu:arg", {"value": "-device"})
+ ET.SubElement(
+ qemu,
+ "qemu:arg",
+ {
+ "value": "virtserialport,"
+ + "chardev=%s_qga0" % self.vm_name
+ + ",name=org.qemu.guest_agent.0"
+ },
+ )
+ self.qga_sock_path = "/tmp/%s_qga0.sock" % self.vm_name
+
+ def add_vm_os(self, **options):
+ os = self.domain.find("os")
+ if "loader" in list(options.keys()):
+ loader = ET.SubElement(os, "loader", {"readonly": "yes", "type": "pflash"})
+ loader.text = options["loader"]
+ if "nvram" in list(options.keys()):
+ nvram = ET.SubElement(os, "nvram")
+ nvram.text = options["nvram"]
+
+ def set_vm_default_aarch64(self):
+ os = ET.SubElement(self.domain, "os")
+ type = ET.SubElement(os, "type", {"arch": "aarch64", "machine": "virt"})
+ type.text = "hvm"
+ ET.SubElement(os, "boot", {"dev": "hd"})
+ features = ET.SubElement(self.domain, "features")
+ ET.SubElement(features, "acpi")
+
+ ET.SubElement(self.domain, "cpu", {"mode": "host-passthrough", "check": "none"})
+
+ def set_vm_default_x86_64(self):
+ os = ET.SubElement(self.domain, "os")
+ type = ET.SubElement(os, "type", {"arch": "x86_64", "machine": "pc-i440fx-1.6"})
+ type.text = "hvm"
+ ET.SubElement(os, "boot", {"dev": "hd"})
+ features = ET.SubElement(self.domain, "features")
+ ET.SubElement(features, "acpi")
+ ET.SubElement(features, "apic")
+ ET.SubElement(features, "pae")
+
+ ET.SubElement(self.domain, "cpu", {"mode": "host-passthrough"})
+ self.__default_nic_pci = "00:1f.0"
+
+ def set_vm_default(self):
+ arch = self.host_session.send_expect("uname -m", "# ")
+ set_default_func = getattr(self, "set_vm_default_" + arch)
+ if callable(set_default_func):
+ set_default_func()
+
+ # qemu-kvm for emulator
+ device = ET.SubElement(self.domain, "devices")
+ ET.SubElement(device, "emulator").text = self.qemu_emulator
+
+ # qemu guest agent
+ self.add_vm_qga(None)
+
+ # add default control interface
+ if not self.__default_nic:
+ if len(self.__default_nic_pci) > 0:
+ def_nic = {
+ "type": "nic",
+ "opt_hostfwd": "",
+ "opt_addr": self.__default_nic_pci,
+ }
+ else:
+ def_nic = {"type": "nic", "opt_hostfwd": ""}
+ self.add_vm_net(**def_nic)
+ self.__default_nic = True
+
+ def set_qemu_emulator(self, qemu_emulator_path):
+ """
+ Set the qemu emulator in the specified path explicitly.
+ """
+ out = self.host_session.send_expect("ls %s" % qemu_emulator_path, "# ")
+ if "No such file or directory" in out:
+ self.host_logger.error(
+ "No emulator [ %s ] on the DUT" % (qemu_emulator_path)
+ )
+ return None
+ out = self.host_session.send_expect(
+ "[ -x %s ];echo $?" % (qemu_emulator_path), "# "
+ )
+ if out != "0":
+ self.host_logger.error(
+ "Emulator [ %s ] " % qemu_emulator_path + "not executable on the DUT"
+ )
+ return None
+ self.qemu_emulator = qemu_emulator_path
+
+ def add_vm_qemu(self, **options):
+ """
+ Options:
+ path: absolute path for qemu emulator
+ """
+ if "path" in list(options.keys()):
+ self.set_qemu_emulator(options["path"])
+ # update emulator config
+ devices = self.domain.find("devices")
+ ET.SubElement(devices, "emulator").text = self.qemu_emulator
+
+ def add_vm_disk(self, **options):
+ """
+ Options:
+ file: absolute path of disk image file
+ type: image file formats
+ """
+ devices = self.domain.find("devices")
+ disk = ET.SubElement(devices, "disk", {"type": "file", "device": "disk"})
+
+ if "file" not in options:
+ return False
+
+ ET.SubElement(disk, "source", {"file": options["file"]})
+ if "opt_format" not in options:
+ disk_type = "raw"
+ else:
+ disk_type = options["opt_format"]
+
+ ET.SubElement(disk, "driver", {"name": "qemu", "type": disk_type})
+
+ if "opt_bus" not in options:
+ bus = "virtio"
+ else:
+ bus = options["opt_bus"]
+ if "opt_dev" not in options:
+ dev = "vd%c" % self.diskindex
+ self.diskindex = chr(ord(self.diskindex) + 1)
+ else:
+ dev = options["opt_dev"]
+ ET.SubElement(disk, "target", {"dev": dev, "bus": bus})
+
+ if "opt_controller" in options:
+ controller = ET.SubElement(
+ devices,
+ "controller",
+ {
+ "type": bus,
+ "index": hex(self.controllerindex)[2:],
+ "model": options["opt_controller"],
+ },
+ )
+ self.controllerindex += 1
+ ET.SubElement(
+ controller,
+ "address",
+ {
+ "type": "pci",
+ "domain": "0x0000",
+ "bus": hex(self.pciindex),
+ "slot": "0x00",
+ "function": "0x00",
+ },
+ )
+ self.pciindex += 1
+
+ def add_vm_daemon(self, **options):
+ pass
+
+ def add_vm_vnc(self, **options):
+ """
+ Add VM display option
+ """
+ disable = options.get("disable")
+ if disable and disable == "True":
+ return
+ else:
+ displayNum = options.get("displayNum")
+ port = (
+ displayNum
+ if displayNum
+ else self.virt_pool.alloc_port(self.vm_name, port_type="display")
+ )
+ ip = self.host_dut.get_ip_address()
+ # set main block
+ graphics = {
+ "type": "vnc",
+ "port": port,
+ "autoport": "yes",
+ "listen": ip,
+ "keymap": "en-us",
+ }
+
+ devices = self.domain.find("devices")
+ graphics = ET.SubElement(devices, "graphics", graphics)
+ # set sub block
+ listen = {
+ "type": "address",
+ "address": ip,
+ }
+ ET.SubElement(graphics, "listen", listen)
+
+ def add_vm_serial_port(self, **options):
+ if "enable" in list(options.keys()):
+ if options["enable"].lower() == "yes":
+ devices = self.domain.find("devices")
+ if "opt_type" in list(options.keys()):
+ serial_type = options["opt_type"]
+ else:
+ serial_type = "unix"
+ if serial_type == "pty":
+ serial = ET.SubElement(devices, "serial", {"type": serial_type})
+ ET.SubElement(serial, "target", {"port": "0"})
+ elif serial_type == "unix":
+ serial = ET.SubElement(devices, "serial", {"type": serial_type})
+ self.serial_path = "/tmp/%s_serial.sock" % self.vm_name
+ ET.SubElement(
+ serial, "source", {"mode": "bind", "path": self.serial_path}
+ )
+ ET.SubElement(serial, "target", {"port": "0"})
+ else:
+ msg = "Serial type %s is not supported!" % serial_type
+ self.logger.error(msg)
+ return False
+ console = ET.SubElement(devices, "console", {"type": serial_type})
+ ET.SubElement(console, "target", {"type": "serial", "port": "0"})
+
+ def add_vm_login(self, **options):
+ """
+ options:
+ user: login username of virtual machine
+ password: login password of virtual machine
+ """
+ if "user" in list(options.keys()):
+ user = options["user"]
+ self.username = user
+
+ if "password" in list(options.keys()):
+ password = options["password"]
+ self.password = password
+
+ def get_vm_login(self):
+ return (self.username, self.password)
+
+ def __parse_pci(self, pci_address):
+ pci_regex = r"([0-9a-fA-F]{1,2}):([0-9a-fA-F]{1,2})" + ".([0-9a-fA-F]{1,2})"
+ pci_regex_domain = (
+ r"([0-9a-fA-F]{1,4}):([0-9a-fA-F]{1,2}):"
+ + "([0-9a-fA-F]{1,2}).([0-9a-fA-F]{1,2})"
+ )
+ m = re.match(pci_regex, pci_address)
+ if m is not None:
+ bus = m.group(1)
+ slot = m.group(2)
+ func = m.group(3)
+ dom = "0"
+ return (bus, slot, func, dom)
+ m = re.match(pci_regex_domain, pci_address)
+ if m is not None:
+ bus = m.group(2)
+ slot = m.group(3)
+ func = m.group(4)
+ dom = m.group(1)
+ return (bus, slot, func, dom)
+ return None
+
+ def set_vm_device(self, driver="pci-assign", **opts):
+ opts["driver"] = driver
+ self.add_vm_device(**opts)
+
+ def __generate_net_config_script(self, switch=DEFAULT_BRIDGE):
+ """
+ Generate a script for qemu emulator to build a tap device
+ between host and guest.
+ """
+ qemu_ifup = self.QEMU_IFUP % {"switch": switch}
+ file_name = os.path.basename(self.QEMU_IFUP_PATH)
+ tmp_file_path = "/tmp/%s" % file_name
+ self.host_dut.create_file(qemu_ifup, tmp_file_path)
+ self.host_session.send_expect(
+ "mv -f ~/%s %s" % (file_name, self.QEMU_IFUP_PATH), "# "
+ )
+ self.host_session.send_expect("chmod +x %s" % self.QEMU_IFUP_PATH, "# ")
+
+ def __parse_opt_setting(self, opt_settings):
+ if "=" not in opt_settings:
+ msg = "wrong opt_settings setting"
+ raise Exception(msg)
+ setting = [item.split("=") for item in opt_settings.split(",")]
+ return dict(setting)
+
+ def __get_pci_addr_config(self, pci):
+ pci = self.__parse_pci(pci)
+ if pci is None:
+ msg = "Invalid guestpci for host device pass-through !!!"
+ self.logger.error(msg)
+ return False
+ bus, slot, func, dom = pci
+ config = {
+ "type": "pci",
+ "domain": "0x%s" % dom,
+ "bus": "0x%s" % bus,
+ "slot": "0x%s" % slot,
+ "function": "0x%s" % func,
+ }
+ return config
+
+ def __write_config(self, parent, configs):
+ for config in configs:
+ node_name = config[0]
+ opt = config[1]
+ node = ET.SubElement(parent, node_name, opt)
+ if len(config) == 3:
+ self.__write_config(node, config[2])
+
+ def __set_vm_bridge_interface(self, **options):
+ mac = options.get("opt_mac")
+ opt_br = options.get("opt_br")
+ if not mac or not opt_br:
+ msg = "Missing some bridge device option !!!"
+ self.logger.error(msg)
+ return False
+ _config = [
+ ["mac", {"address": mac}],
+ [
+ "source",
+ {
+ "bridge": opt_br,
+ },
+ ],
+ [
+ "model",
+ {
+ "type": "virtio",
+ },
+ ],
+ ]
+ config = [["interface", {"type": "bridge"}, _config]]
+ # set xml file
+ parent = self.domain.find("devices")
+ self.__write_config(parent, config)
+
+ def __add_vm_virtio_user_pci(self, **options):
+ mac = options.get("opt_mac")
+ mode = options.get("opt_server") or "client"
+ # unix socket path of character device
+ sock_path = options.get("opt_path")
+ queue = options.get("opt_queue")
+ settings = options.get("opt_settings")
+ # pci address in virtual machine
+ pci = options.get("opt_host")
+ if not mac or not sock_path:
+ msg = "Missing some vhostuser device option !!!"
+ self.logger.error(msg)
+ return False
+ node_name = "interface"
+ # basic options
+ _config = [
+ ["mac", {"address": mac}],
+ [
+ "source",
+ {
+ "type": "unix",
+ "path": sock_path,
+ "mode": mode,
+ },
+ ],
+ [
+ "model",
+ {
+ "type": "virtio",
+ },
+ ],
+ ]
+ # append pci address
+ if pci:
+ _config.append(["address", self.__get_pci_addr_config(pci)])
+ if queue or settings:
+ drv_config = {"name": "vhost"}
+ if settings:
+ _sub_opt = self.__parse_opt_setting(settings)
+ drv_opt = {}
+ guest_opt = {}
+ host_opt = {}
+ for key, value in _sub_opt.items():
+ if key.startswith("host_"):
+ host_opt[key[5:]] = value
+ continue
+ if key.startswith("guest_"):
+ guest_opt[key[6:]] = value
+ continue
+ drv_opt[key] = value
+ drv_config.update(drv_opt)
+ sub_drv_config = []
+ if host_opt:
+ sub_drv_config.append(["host", host_opt])
+ if guest_opt:
+ sub_drv_config.append(["guest", guest_opt])
+ # The optional queues attribute controls the number of queues to be
+ # used for either Multiqueue virtio-net or vhost-user network
+ # interfaces. Each queue will potentially be handled by a different
+ # processor, resulting in much higher throughput. virtio-net since
+ # 1.0.6 (QEMU and KVM only) vhost-user since 1.2.17(QEMU and KVM
+ # only).
+ if queue:
+ drv_config.update(
+ {
+ "queues": queue,
+ }
+ )
+ # set driver config
+ if sub_drv_config:
+ _config.append(["driver", drv_config, sub_drv_config])
+ else:
+ _config.append(["driver", drv_config])
+ config = [[node_name, {"type": "vhostuser"}, _config]]
+ # set xml file
+ parent = self.domain.find("devices")
+ self.__write_config(parent, config)
+
+ def __add_vm_pci_assign(self, **options):
+ devices = self.domain.find("devices")
+ # add hostdev config block
+ config = {"mode": "subsystem", "type": "pci", "managed": "yes"}
+ hostdevice = ET.SubElement(devices, "hostdev", config)
+ # add hostdev/source config block
+ pci_addr = options.get("opt_host")
+ if not pci_addr:
+ msg = "Missing opt_host for device option!!!"
+ self.logger.error(msg)
+ return False
+ pci = self.__parse_pci(pci_addr)
+ if pci is None:
+ return False
+ bus, slot, func, dom = pci
+ source = ET.SubElement(hostdevice, "source")
+ config = {
+ "domain": "0x%s" % dom,
+ "bus": "0x%s" % bus,
+ "slot": "0x%s" % slot,
+ "function": "0x%s" % func,
+ }
+ ET.SubElement(source, "address", config)
+ # add hostdev/source/address config block
+ guest_pci_addr = options.get("guestpci")
+ if not guest_pci_addr:
+ guest_pci_addr = "0000:%s:00.0" % hex(self.pciindex)[2:]
+ self.pciindex += 1
+ config = self.__get_pci_addr_config(guest_pci_addr)
+ ET.SubElement(hostdevice, "address", config)
+ # save host and guest pci address mapping
+ pci_map = {}
+ pci_map["hostpci"] = pci_addr
+ pci_map["guestpci"] = guest_pci_addr
+ self.pci_maps.append(pci_map)
+
+ def add_vm_device(self, **options):
+ """
+ options:
+ pf_idx: device index of pass-through device
+ guestpci: assigned pci address in vm
+ """
+ driver_table = {
+ "vhost-user": self.__add_vm_virtio_user_pci,
+ "bridge": self.__set_vm_bridge_interface,
+ "pci-assign": self.__add_vm_pci_assign,
+ }
+ driver = options.get("driver")
+ if not driver or driver not in list(driver_table.keys()):
+ driver = "pci-assign"
+ msg = "use {0} configuration as default driver".format(driver)
+ self.logger.warning(msg)
+ func = driver_table.get(driver)
+ func(**options)
+
+ def add_vm_net(self, **options):
+ """
+ Options:
+ default: create e1000 netdev and redirect ssh port
+ """
+ if "type" in list(options.keys()):
+ if options["type"] == "nic":
+ self.__add_vm_net_nic(**options)
+ elif options["type"] == "tap":
+ self.__add_vm_net_tap(**options)
+
+ def __add_vm_net_nic(self, **options):
+ """
+ type: nic
+ opt_model: ["e1000" | "virtio" | "i82551" | ...]
+ Default is e1000.
+ opt_addr: ''
+ note: PCI cards only.
+ """
+ if "opt_model" in list(options.keys()):
+ model = options["opt_model"]
+ else:
+ model = "e1000"
+
+ if "opt_hostfwd" in list(options.keys()):
+ port = self.virt_pool.alloc_port(self.vm_name)
+ if port is None:
+ return
+ dut_ip = self.host_dut.crb["IP"]
+ self.vm_ip = "%s:%d" % (dut_ip, port)
+
+ qemu = ET.SubElement(self.domain, "qemu:commandline")
+ ET.SubElement(qemu, "qemu:arg", {"value": "-net"})
+ if "opt_addr" in list(options.keys()):
+ pci = self.__parse_pci(options["opt_addr"])
+ if pci is None:
+ return False
+ bus, slot, func, dom = pci
+ ET.SubElement(
+ qemu, "qemu:arg", {"value": "nic,model=e1000,addr=0x%s" % slot}
+ )
+ else:
+ ET.SubElement(
+ qemu, "qemu:arg", {"value": "nic,model=e1000,addr=0x%x" % self.pciindex}
+ )
+ self.pciindex += 1
+
+ if "opt_hostfwd" in list(options.keys()):
+ ET.SubElement(qemu, "qemu:arg", {"value": "-net"})
+ ET.SubElement(
+ qemu,
+ "qemu:arg",
+ {"value": "user,hostfwd=" "tcp:%s:%d-:22" % (dut_ip, port)},
+ )
+
+ def __add_vm_net_tap(self, **options):
+ """
+ type: tap
+ opt_br: br0
+ note: if choosing tap, need to specify bridge name,
+ else it will be br0.
+ opt_script: QEMU_IFUP_PATH
+ note: if not specified, default is self.QEMU_IFUP_PATH.
+ """
+ _config = [["target", {"dev": "tap0"}]]
+ # add bridge info
+ opt_br = options.get("opt_br")
+ bridge = opt_br if opt_br else self.DEFAULT_BRIDGE
+ _config.append(["source", {"bridge": bridge}])
+ self.__generate_net_config_script(str(bridge))
+ # add network configure script path
+ opt_script = options.get("opt_script")
+ script_path = opt_script if opt_script else self.QEMU_IFUP_PATH
+ _config.append(["script", {"path": script_path}])
+ config = [["interface", {"type": "bridge"}, _config]]
+ # set xml file
+ parent = self.domain.find("devices")
+ self.__write_config(parent, config)
+
+ def add_vm_virtio_serial_channel(self, **options):
+ """
+ Options:
+ path: virtio unix socket absolute path
+ name: virtio serial name in vm
+ """
+ devices = self.domain.find("devices")
+ channel = ET.SubElement(devices, "channel", {"type": "unix"})
+ for opt in ["path", "name"]:
+ if opt not in list(options.keys()):
+ msg = "invalid virtio serial channel setting"
+ self.logger.error(msg)
+ return
+
+ ET.SubElement(channel, "source", {"mode": "bind", "path": options["path"]})
+ ET.SubElement(channel, "target", {"type": "virtio", "name": options["name"]})
+ ET.SubElement(
+ channel,
+ "address",
+ {
+ "type": "virtio-serial",
+ "controller": "0",
+ "bus": "0",
+ "port": "%d" % self.pciindex,
+ },
+ )
+ self.pciindex += 1
+
+ def get_vm_ip(self):
+ return self.vm_ip
+
+ def get_pci_mappings(self):
+ """
+ Return guest and host pci devices mapping structure
+ """
+ return self.pci_maps
+
+ def __control_session(self, command, *args):
+ """
+ Use the qemu guest agent service to control VM.
+ Note:
+ :command: there are these commands as below:
+ cat, fsfreeze, fstrim, halt, ifconfig, info,\
+ ping, powerdown, reboot, shutdown, suspend
+ :args: give different args by the different commands.
+ """
+ if not self.qga_sock_path:
+ self.host_logger.info(
+ "No QGA service between host [ %s ] and guest [ %s ]"
+ % (self.host_dut.Name, self.vm_name)
+ )
+ return None
+
+ cmd_head = (
+ "~/QMP/"
+ + "qemu-ga-client "
+ + "--address=%s %s" % (self.qga_sock_path, command)
+ )
+
+ cmd = cmd_head
+ for arg in args:
+ cmd = cmd_head + " " + str(arg)
+
+ if command is "ping":
+ out = self.host_session.send_expect(cmd, "# ", int(args[0]))
+ else:
+ out = self.host_session.send_expect(cmd, "# ")
+
+ return out
+
+ def _start_vm(self):
+ xml_file = "/tmp/%s.xml" % self.vm_name
+ if os.path.exists(xml_file):
+ os.remove(xml_file)
+ self.root.write(xml_file)
+ with open(xml_file, "r") as fp:
+ content = fp.read()
+ doc = minidom.parseString(content)
+ vm_content = doc.toprettyxml(indent=" ")
+ with open(xml_file, "w") as fp:
+ fp.write(vm_content)
+ self.host_session.copy_file_to(xml_file)
+ time.sleep(2)
+
+ self.host_session.send_expect("virsh", "virsh #")
+ self.host_session.send_expect("create /root/%s.xml" % self.vm_name, "virsh #")
+ self.host_session.send_expect("quit", "# ")
+ out = self.__control_session("ping", "120")
+
+ if "Not responded" in out:
+ raise StartVMFailedException("Not response in 120 seconds!!!")
+
+ self.__wait_vmnet_ready()
+
+ def __wait_vmnet_ready(self):
+ """
+ wait for 120 seconds for vm net ready
+ 10.0.2.* is the default ip address allocated by qemu
+ """
+ count = 20
+ while count:
+ out = self.__control_session("ifconfig")
+ if "10.0.2" in out:
+ pos = self.vm_ip.find(":")
+ ssh_key = "[" + self.vm_ip[:pos] + "]" + self.vm_ip[pos:]
+ os.system("ssh-keygen -R %s" % ssh_key)
+ return True
+ time.sleep(6)
+ count -= 1
+
+ raise StartVMFailedException(
+ "Virtual machine control net not ready " + "in 120 seconds!!!"
+ )
+
+ def stop(self):
+ self.__control_session("shutdown")
+ time.sleep(5)
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 06/23] dts: merge DTS framework/test_capabilities.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (4 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 05/23] dts: merge DTS framework/qemu_libvirt.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 07/23] dts: merge DTS framework/test_case.py " Juraj Linkeš
` (16 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/test_capabilities.py | 5 +++++
1 file changed, 5 insertions(+)
create mode 100644 dts/framework/test_capabilities.py
diff --git a/dts/framework/test_capabilities.py b/dts/framework/test_capabilities.py
new file mode 100644
index 0000000000..5442f89e1d
--- /dev/null
+++ b/dts/framework/test_capabilities.py
@@ -0,0 +1,5 @@
+# this structure will be used to determine which parts of tests should be skipped
+"""
+Dict used to skip parts of tests if NIC is known not to support them
+"""
+DRIVER_TEST_LACK_CAPA = {"sctp_tx_offload": ["thunder-nicvf", "qede"]}
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 07/23] dts: merge DTS framework/test_case.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (5 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 06/23] dts: merge DTS framework/test_capabilities.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 08/23] dts: merge DTS framework/virt_base.py " Juraj Linkeš
` (15 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/test_case.py | 625 +++++++++++++++++++++++++++++++++++++
1 file changed, 625 insertions(+)
create mode 100644 dts/framework/test_case.py
diff --git a/dts/framework/test_case.py b/dts/framework/test_case.py
new file mode 100644
index 0000000000..1f5d383bae
--- /dev/null
+++ b/dts/framework/test_case.py
@@ -0,0 +1,625 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+A base class for creating DTF test cases.
+"""
+import re
+import signal
+import time
+import traceback
+from functools import wraps
+
+import framework.debugger as debugger
+
+from .config import SuiteConf
+from .exception import TimeoutException, VerifyFailure, VerifySkip
+from .logger import getLogger
+from .rst import RstReport
+from .settings import (
+ DEBUG_CASE_SETTING,
+ DEBUG_SETTING,
+ DRIVERS,
+ FUNC_SETTING,
+ HOST_DRIVER_SETTING,
+ NICS,
+ PERF_SETTING,
+ SUITE_SECTION_NAME,
+ UPDATE_EXPECTED,
+ get_nic_name,
+ load_global_setting,
+)
+from .test_result import Result, ResultTable
+from .utils import BLUE, RED
+
+
+class TestCase(object):
+ def __init__(self, duts, tester, target, suitename):
+ self.suite_name = suitename
+ self.dut = duts[0]
+ self.duts = duts
+ self.tester = tester
+ self.target = target
+
+ # local variable
+ self._requested_tests = None
+ self._subtitle = None
+
+ # check session and reconnect if possible
+ for dutobj in self.duts:
+ self._check_and_reconnect(crb=dutobj)
+ self._check_and_reconnect(crb=self.tester)
+
+ # convert netdevice to codename
+ self.nic = self.dut.nic.name
+ self.nic_obj = self.dut.nic
+ self.kdriver = self.dut.nic.default_driver
+ self.pkg = self.dut.nic.pkg
+
+ # result object for save suite result
+ self._suite_result = Result()
+ self._suite_result.dut = self.dut.crb["IP"]
+ self._suite_result.target = target
+ self._suite_result.nic = self.nic
+ self._suite_result.test_suite = self.suite_name
+ if self._suite_result is None:
+ raise ValueError("Result object should not None")
+
+ # load running environment
+ if load_global_setting(PERF_SETTING) == "yes":
+ self._enable_perf = True
+ else:
+ self._enable_perf = False
+
+ if load_global_setting(FUNC_SETTING) == "yes":
+ self._enable_func = True
+ else:
+ self._enable_func = False
+
+ if load_global_setting(DEBUG_SETTING) == "yes":
+ self._enable_debug = True
+ else:
+ self._enable_debug = False
+
+ if load_global_setting(DEBUG_CASE_SETTING) == "yes":
+ self._debug_case = True
+ else:
+ self._debug_case = False
+
+ self.drivername = load_global_setting(HOST_DRIVER_SETTING)
+
+ # create rst format report for this suite
+ self._rst_obj = RstReport(
+ "rst_report", target, self.nic, self.suite_name, self._enable_perf
+ )
+
+ # load suite configuration
+ self._suite_conf = SuiteConf(self.suite_name)
+ self._suite_cfg = self._suite_conf.suite_cfg
+
+ # command history
+ self.setup_history = list()
+ self.test_history = list()
+
+ def init_log(self):
+ # get log handler
+ class_name = self.__class__.__name__
+ self.logger = getLogger(class_name)
+ self.logger.config_suite(class_name)
+
+ def _check_and_reconnect(self, crb=None):
+ try:
+ result = crb.session.check_available()
+ except:
+ result = False
+
+ if result is False:
+ crb.reconnect_session()
+ if "dut" in str(type(crb)):
+ crb.send_expect("cd %s" % crb.base_dir, "#")
+ crb.set_env_variable()
+
+ try:
+ result = crb.alt_session.check_available()
+ except:
+ result = False
+
+ if result is False:
+ crb.reconnect_session(alt_session=True)
+
+ def set_up_all(self):
+ pass
+
+ def set_up(self):
+ pass
+
+ def tear_down(self):
+ pass
+
+ def tear_down_all(self):
+ pass
+
+ def verify(self, passed, description):
+ if not passed:
+ if self._enable_debug:
+ print(RED("Error happened, dump command history..."))
+ self.dump_history()
+ print('Error "%s" happened' % RED(description))
+ print(RED("History dump finished."))
+ raise VerifyFailure(description)
+
+ def skip_case(self, passed, description):
+ if not passed:
+ if self._enable_debug:
+ print('skip case: "%s" ' % RED(description))
+ raise VerifySkip(description)
+
+ def _get_nic_driver(self, nic_name):
+ if nic_name in list(DRIVERS.keys()):
+ return DRIVERS[nic_name]
+
+ return "Unknown"
+
+ def set_check_inst(self, check=None):
+ self._check_inst = check
+
+ def rst_report(self, *args, **kwargs):
+ self._rst_obj.report(*args, **kwargs)
+
+ def result_table_create(self, header):
+ self._result_table = ResultTable(header)
+ self._result_table.set_rst(self._rst_obj)
+ self._result_table.set_logger(self.logger)
+
+ def result_table_add(self, row):
+ self._result_table.add_row(row)
+
+ def result_table_print(self):
+ self._result_table.table_print()
+
+ def result_table_getrows(self):
+ return self._result_table.results_table_rows
+
+ def _get_functional_cases(self):
+ """
+ Get all functional test cases.
+ """
+ return self._get_test_cases(r"test_(?!perf_)")
+
+ def _get_performance_cases(self):
+ """
+ Get all performance test cases.
+ """
+ return self._get_test_cases(r"test_perf_")
+
+ def _has_it_been_requested(self, test_case, test_name_regex):
+ """
+ Check whether test case has been requested for validation.
+ """
+ name_matches = re.match(test_name_regex, test_case.__name__)
+
+ if self._requested_tests is not None:
+ return name_matches and test_case.__name__ in self._requested_tests
+
+ return name_matches
+
+ def set_requested_cases(self, case_list):
+ """
+ Pass down input cases list for check
+ """
+ if self._requested_tests is None:
+ self._requested_tests = case_list
+ elif case_list is not None:
+ self._requested_tests += case_list
+
+ def set_subtitle(self, subtitle):
+ """
+ Pass down subtitle for Rst report
+ """
+ self._rst_obj._subtitle = subtitle
+ self._rst_obj.write_subtitle()
+
+ def _get_test_cases(self, test_name_regex):
+ """
+ Return case list which name matched regex.
+ """
+ for test_case_name in dir(self):
+ test_case = getattr(self, test_case_name)
+ if callable(test_case) and self._has_it_been_requested(
+ test_case, test_name_regex
+ ):
+ yield test_case
+
+ def execute_setup_all(self):
+ """
+ Execute suite setup_all function before cases.
+ """
+ # clear all previous output
+ for dutobj in self.duts:
+ dutobj.get_session_output(timeout=0.1)
+ self.tester.get_session_output(timeout=0.1)
+
+ # save into setup history list
+ self.enable_history(self.setup_history)
+
+ try:
+ self.set_up_all()
+ return True
+ except VerifySkip as v:
+ self.logger.info("set_up_all SKIPPED:\n" + traceback.format_exc())
+ # record all cases N/A
+ if self._enable_func:
+ for case_obj in self._get_functional_cases():
+ self._suite_result.test_case = case_obj.__name__
+ self._suite_result.test_case_skip(str(v))
+ if self._enable_perf:
+ for case_obj in self._get_performance_cases():
+ self._suite_result.test_case = case_obj.__name__
+ self._suite_result.test_case_skip(str(v))
+ except Exception as v:
+ self.logger.error("set_up_all failed:\n" + traceback.format_exc())
+ # record all cases blocked
+ if self._enable_func:
+ for case_obj in self._get_functional_cases():
+ self._suite_result.test_case = case_obj.__name__
+ self._suite_result.test_case_blocked(
+ "set_up_all failed: {}".format(str(v))
+ )
+ if self._enable_perf:
+ for case_obj in self._get_performance_cases():
+ self._suite_result.test_case = case_obj.__name__
+ self._suite_result.test_case_blocked(
+ "set_up_all failed: {}".format(str(v))
+ )
+ return False
+
+ def _execute_test_case(self, case_obj):
+ """
+ Execute specified test case in specified suite. If any exception occurred in
+ validation process, save the result and tear down this case.
+ """
+ case_name = case_obj.__name__
+ self._suite_result.test_case = case_obj.__name__
+
+ self._rst_obj.write_title("Test Case: " + case_name)
+
+ # save into test command history
+ self.test_history = list()
+ self.enable_history(self.test_history)
+
+ # load suite configuration file here for rerun command
+ self._suite_conf = SuiteConf(self.suite_name)
+ self._suite_cfg = self._suite_conf.suite_cfg
+ self._case_cfg = self._suite_conf.load_case_config(case_name)
+
+ case_result = True
+ if self._check_inst is not None:
+ if self._check_inst.case_skip(case_name[len("test_") :]):
+ self.logger.info("Test Case %s Result SKIPPED:" % case_name)
+ self._rst_obj.write_result("N/A")
+ self._suite_result.test_case_skip(self._check_inst.comments)
+ return case_result
+
+ if not self._check_inst.case_support(case_name[len("test_") :]):
+ self.logger.info("Test Case %s Result SKIPPED:" % case_name)
+ self._rst_obj.write_result("N/A")
+ self._suite_result.test_case_skip(self._check_inst.comments)
+ return case_result
+
+ if self._enable_perf:
+ self._rst_obj.write_annex_title("Annex: " + case_name)
+ try:
+ self.logger.info("Test Case %s Begin" % case_name)
+
+ self.running_case = case_name
+ # clean session
+ for dutobj in self.duts:
+ dutobj.get_session_output(timeout=0.1)
+ self.tester.get_session_output(timeout=0.1)
+ # run set_up function for each case
+ self.set_up()
+ # run test case
+ case_obj()
+
+ self._suite_result.test_case_passed()
+
+ self._rst_obj.write_result("PASS")
+ self.logger.info("Test Case %s Result PASSED:" % case_name)
+
+ except VerifyFailure as v:
+ case_result = False
+ self._suite_result.test_case_failed(str(v))
+ self._rst_obj.write_result("FAIL")
+ self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(v))
+ except VerifySkip as v:
+ self._suite_result.test_case_skip(str(v))
+ self._rst_obj.write_result("N/A")
+ self.logger.info("Test Case %s N/A: " % (case_name))
+ except KeyboardInterrupt:
+ self._suite_result.test_case_blocked("Skipped")
+ self.logger.error("Test Case %s SKIPPED: " % (case_name))
+ self.tear_down()
+ raise KeyboardInterrupt("Stop DTS")
+ except TimeoutException as e:
+ case_result = False
+ self._rst_obj.write_result("FAIL")
+ self._suite_result.test_case_failed(str(e))
+ self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(e))
+ self.logger.error("%s" % (e.get_output()))
+ except Exception:
+ case_result = False
+ trace = traceback.format_exc()
+ self._suite_result.test_case_failed(trace)
+ self.logger.error("Test Case %s Result ERROR: " % (case_name) + trace)
+ finally:
+ # update expected
+ if (
+ load_global_setting(UPDATE_EXPECTED) == "yes"
+ and "update_expected" in self.get_suite_cfg()
+ and self.get_suite_cfg()["update_expected"] == True
+ ):
+ self._suite_conf.update_case_config(SUITE_SECTION_NAME)
+ self.execute_tear_down()
+ return case_result
+
+ def execute_test_cases(self):
+ """
+ Execute all test cases in one suite.
+ """
+ # prepare debugger rerun case environment
+ if self._enable_debug or self._debug_case:
+ debugger.AliveSuite = self
+ _suite_full_name = "TestSuite_" + self.suite_name
+ debugger.AliveModule = __import__(
+ "tests." + _suite_full_name, fromlist=[_suite_full_name]
+ )
+
+ if load_global_setting(FUNC_SETTING) == "yes":
+ for case_obj in self._get_functional_cases():
+ for i in range(self.tester.re_run_time + 1):
+ ret = self.execute_test_case(case_obj)
+
+ if ret is False and self.tester.re_run_time:
+ for dutobj in self.duts:
+ dutobj.get_session_output(timeout=0.5 * (i + 1))
+ self.tester.get_session_output(timeout=0.5 * (i + 1))
+ time.sleep(i + 1)
+ self.logger.info(
+ " Test case %s failed and re-run %d time"
+ % (case_obj.__name__, i + 1)
+ )
+ else:
+ break
+
+ if load_global_setting(PERF_SETTING) == "yes":
+ for case_obj in self._get_performance_cases():
+ self.execute_test_case(case_obj)
+
+ def execute_test_case(self, case_obj):
+ """
+ Execute test case or enter into debug mode.
+ """
+ debugger.AliveCase = case_obj.__name__
+
+ if self._debug_case:
+ self.logger.info("Rerun Test Case %s Begin" % debugger.AliveCase)
+ debugger.keyboard_handle(signal.SIGINT, None)
+ else:
+ return self._execute_test_case(case_obj)
+
+ def get_result(self):
+ """
+ Return suite test result
+ """
+ return self._suite_result
+
+ def get_case_cfg(self):
+ """
+ Return case based configuration
+ """
+ return self._case_cfg
+
+ def get_suite_cfg(self):
+ """
+ Return suite based configuration
+ """
+ return self._suite_cfg
+
+ def update_suite_cfg(self, suite_cfg):
+ """
+ Update suite based configuration
+ """
+ self._suite_cfg = suite_cfg
+
+ def update_suite_cfg_ele(self, key, value):
+ """
+ update one element of suite configuration
+ """
+ self._suite_cfg[key] = value
+
+ def execute_tear_downall(self):
+ """
+ execute suite tear_down_all function
+ """
+ try:
+ self.tear_down_all()
+ except Exception:
+ self.logger.error("tear_down_all failed:\n" + traceback.format_exc())
+
+ for dutobj in self.duts:
+ dutobj.kill_all()
+ self.tester.kill_all()
+
+ for dutobj in self.duts:
+ dutobj.virt_exit()
+ # destroy all vfs
+ dutobj.destroy_all_sriov_vfs()
+
+ def execute_tear_down(self):
+ """
+ execute suite tear_down function
+ """
+ try:
+ self.tear_down()
+ except Exception:
+ self.logger.error("tear_down failed:\n" + traceback.format_exc())
+ self.logger.warning(
+ "tear down %s failed, might iterfere next case's result!"
+ % self.running_case
+ )
+
+ def enable_history(self, history):
+ """
+ Enable history for all CRB's default session
+ """
+ for dutobj in self.duts:
+ dutobj.session.set_history(history)
+
+ self.tester.session.set_history(history)
+
+ def dump_history(self):
+ """
+ Dump recorded command history
+ """
+ for cmd_history in self.setup_history:
+ print("%-20s: %s" % (BLUE(cmd_history["name"]), cmd_history["command"]))
+ for cmd_history in self.test_history:
+ print("%-20s: %s" % (BLUE(cmd_history["name"]), cmd_history["command"]))
+
+ def wirespeed(self, nic, frame_size, num_ports):
+ """
+ Calculate bit rate. It is depended for NICs
+ """
+ bitrate = 1000.0 # 1Gb ('.0' forces to operate as float)
+ if self.nic == "any" or self.nic == "cfg":
+ driver = self._get_nic_driver(self.dut.ports_info[0]["type"])
+ nic = get_nic_name(self.dut.ports_info[0]["type"])
+ else:
+ driver = self._get_nic_driver(self.nic)
+ nic = self.nic
+
+ if driver == "ixgbe":
+ bitrate *= 10 # 10 Gb NICs
+ elif nic == "avoton2c5":
+ bitrate *= 2.5 # 2.5 Gb NICs
+ elif nic in ["fortville_spirit", "fortville_spirit_single"]:
+ bitrate *= 40
+ elif nic == "fortville_eagle":
+ bitrate *= 10
+ elif nic == "fortpark_TLV":
+ bitrate *= 10
+ elif driver == "thunder-nicvf":
+ bitrate *= 10
+ elif nic == "fortville_25g":
+ bitrate *= 25
+ elif nic == "columbiaville_25g":
+ bitrate *= 25
+ elif nic == "columbiaville_25gx2":
+ bitrate *= 25
+ elif nic == "columbiaville_100g":
+ bitrate *= 100
+
+ return bitrate * num_ports / 8 / (frame_size + 20)
+
+ def bind_nic_driver(self, ports, driver=""):
+ for port in ports:
+ netdev = self.dut.ports_info[port]["port"]
+ driver_now = netdev.get_nic_driver()
+ driver_new = driver if driver else netdev.default_driver
+ if driver_new != driver_now:
+ netdev.bind_driver(driver=driver_new)
+
+
+def skip_unsupported_pkg(pkgs):
+ """
+ Skip case which are not supported by the input pkgs
+ """
+ if isinstance(pkgs, str):
+ pkgs = [pkgs]
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ test_case = args[0]
+ pkg_type = test_case.pkg.get("type")
+ pkg_version = test_case.pkg.get("version")
+ if not pkg_type or not pkg_version:
+ raise VerifyFailure("Failed due to pkg is empty".format(test_case.pkg))
+ for pkg in pkgs:
+ if pkg in pkg_type:
+ raise VerifySkip(
+ "{} {} do not support this case".format(pkg_type, pkg_version)
+ )
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
+
+
+def skip_unsupported_nic(nics):
+ """
+ Skip case which are not supported by the input nics
+ """
+ if isinstance(nics, str):
+ nics = [nics]
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ test_case = args[0]
+ if test_case.nic in nics:
+ raise VerifySkip("{} do not support this case".format(test_case.nic))
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
+
+
+def check_supported_nic(nics):
+ """
+ check if the test case is supported by the input nics
+ """
+ if isinstance(nics, str):
+ nics = [nics]
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ test_case = args[0]
+ if test_case.nic not in nics:
+ raise VerifySkip("{} do not support this case".format(test_case.nic))
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 08/23] dts: merge DTS framework/virt_base.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (6 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 07/23] dts: merge DTS framework/test_case.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 09/23] dts: merge DTS framework/virt_common.py " Juraj Linkeš
` (14 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/virt_base.py | 553 +++++++++++++++++++++++++++++++++++++
1 file changed, 553 insertions(+)
create mode 100644 dts/framework/virt_base.py
diff --git a/dts/framework/virt_base.py b/dts/framework/virt_base.py
new file mode 100644
index 0000000000..d4af8b985f
--- /dev/null
+++ b/dts/framework/virt_base.py
@@ -0,0 +1,553 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
+import sys
+import threading
+import traceback
+from random import randint
+
+import framework.exception as exception
+import framework.utils as utils
+
+from .config import VIRTCONF, VirtConf
+from .dut import Dut
+from .logger import getLogger
+from .settings import CONFIG_ROOT_PATH
+from .virt_dut import VirtDut
+
+ST_NOTSTART = "NOTSTART"
+ST_PAUSE = "PAUSE"
+ST_RUNNING = "RUNNING"
+ST_UNKNOWN = "UNKNOWN"
+VM_IMG_LIST = []
+mutex_vm_list = threading.Lock()
+
+
+class VirtBase(object):
+ """
+ Basic module for customer special virtual type. This module implement
+ functions configured and composed in the VM boot command. With these
+ function, we can get and set the VM boot command, and instantiate the VM.
+ """
+
+ def __init__(self, dut, vm_name, suite_name):
+ """
+ Initialize the VirtBase.
+ dut: the instance of Dut
+ vm_name: the name of VM which you have configured in the configure
+ suite_name: the name of test suite
+ """
+ self.host_dut = dut
+ self.vm_name = vm_name
+ self.suite = suite_name
+ # indicate whether the current vm is migration vm
+ self.migration_vm = False
+
+ # create self used host session, need close it later
+ self.host_session = self.host_dut.new_session(self.vm_name)
+
+ self.host_logger = self.host_dut.logger
+ # base_dir existed for host dut has prepared it
+ self.host_session.send_expect("cd %s" % self.host_dut.base_dir, "# ")
+
+ # init the host resource pool for VM
+ self.virt_pool = self.host_dut.virt_pool
+
+ if not self.has_virtual_ability():
+ if not self.enable_virtual_ability():
+ raise Exception("Dut [ %s ] cannot have the virtual ability!!!")
+
+ self.virt_type = self.get_virt_type()
+
+ self.params = []
+ self.local_conf = []
+
+ # default call back function is None
+ self.callback = None
+
+ # vm status is running by default, only be changed in internal module
+ self.vm_status = ST_RUNNING
+
+ # by default no special kernel module is required
+ self.def_driver = ""
+ self.driver_mode = ""
+
+ def get_virt_type(self):
+ """
+ Get the virtual type, such as KVM, XEN or LIBVIRT.
+ """
+ raise NotImplementedError
+
+ def has_virtual_ability(self):
+ """
+ Check if the host have the ability of virtualization.
+ """
+ NotImplemented
+
+ def enable_virtual_ability(self):
+ """
+ Enable the virtual ability on the DUT.
+ """
+ NotImplemented
+
+ def get_vm_login(self):
+ """
+ Get VM credentials.
+ """
+ raise NotImplementedError
+
+ def add_vm_login(self):
+ """
+ Add VM credentials.
+ """
+ raise NotImplementedError
+
+ def _attach_vm(self):
+ """
+ Attach VM.
+ """
+ raise NotImplementedError
+
+ def _quick_start_vm(self):
+ """
+ Quick start VM.
+ """
+ raise NotImplementedError
+
+ def load_global_config(self):
+ """
+ Load global configure in the path CONFIG_ROOT_PATH.
+ """
+ conf = VirtConf(VIRTCONF)
+ conf.load_virt_config(self.virt_type)
+ global_conf = conf.get_virt_config()
+ for param in global_conf:
+ for key in list(param.keys()):
+ if self.find_option_index(key) is None:
+ self.__save_local_config(key, param[key])
+
+ def set_local_config(self, local_conf):
+ """
+ Configure VM configuration from user input
+ """
+ self.local_conf = local_conf
+
+ def load_local_config(self, suite_name):
+ """
+ Load local configure in the path CONFIG_ROOT_PATH ('DTS_ROOT_PATH/$DTS_CFG_FOLDER/' by default).
+ """
+ # load local configuration by suite and vm name
+ try:
+ conf = VirtConf(CONFIG_ROOT_PATH + os.sep + suite_name + ".cfg")
+ conf.load_virt_config(self.vm_name)
+ self.local_conf = conf.get_virt_config()
+ except:
+ # when met exception in load VM config
+ # just leave local conf untouched
+ pass
+
+ # replace global configurations with local configurations
+ for param in self.local_conf:
+ if "virt_type" in list(param.keys()):
+ # param 'virt_type' is for virt_base only
+ continue
+ # save local configurations
+ for key in list(param.keys()):
+ self.__save_local_config(key, param[key])
+
+ def __save_local_config(self, key, value):
+ """
+ Save the local config into the global dict self.param.
+ """
+ for param in self.params:
+ if key in list(param.keys()):
+ param[key] = value
+ return
+
+ self.params.append({key: value})
+
+ def compose_boot_param(self):
+ """
+ Compose all boot param for starting the VM.
+ """
+ for param in self.params:
+ key = list(param.keys())[0]
+ value = param[key]
+ try:
+ param_func = getattr(self, "add_vm_" + key)
+ if callable(param_func):
+ if type(value) is list:
+ for option in value:
+ param_func(**option)
+ else:
+ print(utils.RED("Virt %s function not callable!!!" % key))
+ except AttributeError:
+ self.host_logger.error(traceback.print_exception(*sys.exc_info()))
+ print(utils.RED("Virt %s function not implemented!!!" % key))
+ except Exception:
+ self.host_logger.error(traceback.print_exception(*sys.exc_info()))
+ raise exception.VirtConfigParamException(key)
+
+ def add_vm_def_driver(self, **options):
+ """
+ Set default driver which may required when setup VM
+ """
+ if "driver_name" in list(options.keys()):
+ self.def_driver = options["driver_name"]
+ if "driver_mode" in list(options.keys()):
+ self.driver_mode = options["driver_mode"]
+
+ def find_option_index(self, option):
+ """
+ Find the boot option in the params which is generated from
+ the global and local configures, and this function will
+ return the index by which option can be indexed in the
+ param list.
+ """
+ index = 0
+ for param in self.params:
+ key = list(param.keys())[0]
+ if key.strip() == option.strip():
+ return index
+ index += 1
+
+ return None
+
+ def generate_unique_mac(self):
+ """
+ Generate a unique MAC based on the DUT.
+ """
+ mac_head = "00:00:00:"
+ mac_tail = ":".join(
+ ["%02x" % x for x in map(lambda x: randint(0, 255), list(range(3)))]
+ )
+ return mac_head + mac_tail
+
+ def get_vm_ip(self):
+ """
+ Get the VM IP.
+ """
+ raise NotImplementedError
+
+ def get_pci_mappings(self):
+ """
+ Get host and VM pass-through device mapping
+ """
+ NotImplemented
+
+ def isalive(self):
+ """
+ Check whether VM existed.
+ """
+ vm_status = self.host_session.send_expect(
+ "ps aux | grep qemu | grep 'name %s '| grep -v grep" % self.vm_name, "# "
+ )
+
+ if self.vm_name in vm_status:
+ return True
+ else:
+ return False
+
+ def load_config(self):
+ """
+ Load configurations for VM
+ """
+ # load global and suite configuration file
+ self.load_global_config()
+ self.load_local_config(self.suite)
+
+ def attach(self):
+ # load configuration
+ self.load_config()
+
+ # change login user/password
+ index = self.find_option_index("login")
+ if index:
+ value = self.params[index]["login"]
+ for option in value:
+ self.add_vm_login(**option)
+
+ # attach real vm
+ self._attach_vm()
+ return None
+
+ def start(self, load_config=True, set_target=True, cpu_topo="", bind_dev=True):
+ """
+ Start VM and instantiate the VM with VirtDut.
+ """
+ try:
+ if load_config is True:
+ self.load_config()
+ # compose boot command for different hypervisors
+ self.compose_boot_param()
+
+ # start virtual machine
+ self._start_vm()
+
+ if self.vm_status is ST_RUNNING:
+ # connect vm dut and init running environment
+ vm_dut = self.instantiate_vm_dut(
+ set_target, cpu_topo, bind_dev=bind_dev, autodetect_topo=True
+ )
+ else:
+ vm_dut = None
+
+ except Exception as vm_except:
+ if self.handle_exception(vm_except):
+ print(utils.RED("Handled exception " + str(type(vm_except))))
+ else:
+ print(utils.RED("Unhandled exception " + str(type(vm_except))))
+
+ if callable(self.callback):
+ self.callback()
+
+ return None
+ return vm_dut
+
+ def quick_start(self, load_config=True, set_target=True, cpu_topo=""):
+ """
+ Only Start VM and not do anything else, will be helpful in multiple VMs
+ """
+ try:
+ if load_config is True:
+ self.load_config()
+ # compose boot command for different hypervisors
+ self.compose_boot_param()
+
+ # start virtual machine
+ self._quick_start_vm()
+
+ except Exception as vm_except:
+ if self.handle_exception(vm_except):
+ print(utils.RED("Handled exception " + str(type(vm_except))))
+ else:
+ print(utils.RED("Unhandled exception " + str(type(vm_except))))
+
+ if callable(self.callback):
+ self.callback()
+
+ def migrated_start(self, set_target=True, cpu_topo=""):
+ """
+ Instantiate the VM after migration done
+ There's no need to load param and start VM because VM has been started
+ """
+ try:
+ if self.vm_status is ST_PAUSE:
+ # flag current vm is migration vm
+ self.migration_vm = True
+ # connect backup vm dut and it just inherited from host
+ vm_dut = self.instantiate_vm_dut(
+ set_target, cpu_topo, bind_dev=False, autodetect_topo=False
+ )
+ except Exception as vm_except:
+ if self.handle_exception(vm_except):
+ print(utils.RED("Handled exception " + str(type(vm_except))))
+ else:
+ print(utils.RED("Unhandled exception " + str(type(vm_except))))
+
+ return None
+
+ return vm_dut
+
+ def handle_exception(self, vm_except):
+ # show exception back trace
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ traceback.print_exception(
+ exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout
+ )
+ if type(vm_except) is exception.ConfigParseException:
+ # nothing to handle just return True
+ return True
+ elif type(vm_except) is exception.VirtConfigParseException:
+ # nothing to handle just return True
+ return True
+ elif type(vm_except) is exception.VirtConfigParamException:
+ # nothing to handle just return True
+ return True
+ elif type(vm_except) is exception.StartVMFailedException:
+ # start vm failure
+ return True
+ elif type(vm_except) is exception.VirtDutConnectException:
+ # need stop vm
+ self._stop_vm()
+ return True
+ elif type(vm_except) is exception.VirtDutInitException:
+ # need close session
+ vm_except.vm_dut.close()
+ # need stop vm
+ self.stop()
+ return True
+ else:
+ return False
+
+ def _start_vm(self):
+ """
+ Start VM.
+ """
+ NotImplemented
+
+ def _stop_vm(self):
+ """
+ Stop VM.
+ """
+ NotImplemented
+
+ def get_vm_img(self):
+ """
+ get current vm img name from params
+ get format like: 10.67.110.11:TestVhostMultiQueueQemu:/home/img/Ub1604.img
+ """
+ param_len = len(self.params)
+ for i in range(param_len):
+ if "disk" in list(self.params[i].keys()):
+ value = self.params[i]["disk"][0]
+ if "file" in list(value.keys()):
+ host_ip = self.host_dut.get_ip_address()
+ return (
+ host_ip
+ + ":"
+ + self.host_dut.test_classname
+ + ":"
+ + value["file"]
+ )
+ return None
+
+ def instantiate_vm_dut(
+ self, set_target=True, cpu_topo="", bind_dev=True, autodetect_topo=True
+ ):
+ """
+ Instantiate the Dut class for VM.
+ """
+ crb = self.host_dut.crb.copy()
+ crb["bypass core0"] = False
+ vm_ip = self.get_vm_ip()
+ crb["IP"] = vm_ip
+ crb["My IP"] = vm_ip
+ username, password = self.get_vm_login()
+ crb["user"] = username
+ crb["pass"] = password
+
+ serializer = self.host_dut.serializer
+
+ try:
+ vm_dut = VirtDut(
+ self,
+ crb,
+ serializer,
+ self.virt_type,
+ self.vm_name,
+ self.suite,
+ cpu_topo,
+ dut_id=self.host_dut.dut_id,
+ )
+ except Exception as vm_except:
+ self.handle_exception(vm_except)
+ raise exception.VirtDutConnectException
+ return None
+
+ vm_dut.nic_type = "any"
+ vm_dut.tester = self.host_dut.tester
+ vm_dut.host_dut = self.host_dut
+ vm_dut.host_session = self.host_session
+ vm_dut.init_log()
+ vm_dut.migration_vm = self.migration_vm
+
+ read_cache = False
+ skip_setup = self.host_dut.skip_setup
+ vm_img = self.get_vm_img()
+ # if current vm is migration vm, skip compile dpdk
+ # if VM_IMG_list include the vm_img, it means the vm have complie the dpdk ok, skip it
+ if self.migration_vm or vm_img in VM_IMG_LIST:
+ skip_setup = True
+ base_dir = self.host_dut.base_dir
+ vm_dut.set_speedup_options(read_cache, skip_setup)
+
+ # package and patch should be set before prerequisites
+ vm_dut.set_package(self.host_dut.package, self.host_dut.patches)
+
+ # base_dir should be set before prerequisites
+ vm_dut.set_directory(base_dir)
+
+ try:
+ # setting up dpdk in vm, must call at last
+ vm_dut.target = self.host_dut.target
+ vm_dut.prerequisites(
+ self.host_dut.package, self.host_dut.patches, autodetect_topo
+ )
+ if set_target:
+ target = self.host_dut.target
+ vm_dut.set_target(target, bind_dev, self.def_driver, self.driver_mode)
+ except:
+ raise exception.VirtDutInitException(vm_dut)
+ return None
+
+ # after prerequisites and set_target, the dpdk compile is ok, add this vm img to list
+ if vm_img not in VM_IMG_LIST:
+ mutex_vm_list.acquire()
+ VM_IMG_LIST.append(vm_img)
+ mutex_vm_list.release()
+
+ self.vm_dut = vm_dut
+ return vm_dut
+
+ def stop(self):
+ """
+ Stop the VM.
+ """
+ self._stop_vm()
+ self.quit()
+
+ self.virt_pool.free_all_resource(self.vm_name)
+
+ def quit(self):
+ """
+ Just quit connection to the VM
+ """
+ if getattr(self, "host_session", None):
+ self.host_session.close()
+ self.host_session = None
+
+ # vm_dut may not init in migration case
+ if getattr(self, "vm_dut", None):
+ if self.vm_status is ST_RUNNING:
+ self.vm_dut.close()
+ else:
+ # when vm is not running, not close session forcely
+ self.vm_dut.close(force=True)
+
+ self.vm_dut.logger.logger_exit()
+ self.vm_dut = None
+
+ def register_exit_callback(self, callback):
+ """
+ Call register exit call back function
+ """
+ self.callback = callback
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 09/23] dts: merge DTS framework/virt_common.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (7 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 08/23] dts: merge DTS framework/virt_base.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 10/23] dts: merge DTS framework/virt_dut.py " Juraj Linkeš
` (13 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/virt_common.py | 54 ++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
create mode 100644 dts/framework/virt_common.py
diff --git a/dts/framework/virt_common.py b/dts/framework/virt_common.py
new file mode 100644
index 0000000000..eb5ee0667a
--- /dev/null
+++ b/dts/framework/virt_common.py
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
+
+from .config import VirtConf
+from .qemu_kvm import QEMUKvm
+from .qemu_libvirt import LibvirtKvm
+from .settings import CONFIG_ROOT_PATH
+
+
+def VM(dut, vm_name, suite_name):
+ conf = VirtConf(CONFIG_ROOT_PATH + os.sep + suite_name + ".cfg")
+ conf.load_virt_config(vm_name)
+ local_conf = conf.get_virt_config()
+ # Default virt_type is 'KVM'
+ virt_type = "KVM"
+ for param in local_conf:
+ if "virt_type" in list(param.keys()):
+ virt_type = param["virt_type"][0]["virt_type"]
+
+ if virt_type == "KVM":
+ return QEMUKvm(dut, vm_name, suite_name)
+ elif virt_type == "LIBVIRT":
+ return LibvirtKvm(dut, vm_name, suite_name)
+ else:
+ raise Exception("Virt type %s is not supported!" % virt_type)
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 10/23] dts: merge DTS framework/virt_dut.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (8 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 09/23] dts: merge DTS framework/virt_common.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 11/23] dts: merge DTS framework/virt_resource.py " Juraj Linkeš
` (12 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/virt_dut.py | 463 ++++++++++++++++++++++++++++++++++++++
1 file changed, 463 insertions(+)
create mode 100644 dts/framework/virt_dut.py
diff --git a/dts/framework/virt_dut.py b/dts/framework/virt_dut.py
new file mode 100644
index 0000000000..369abacf37
--- /dev/null
+++ b/dts/framework/virt_dut.py
@@ -0,0 +1,463 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import time
+
+import framework.settings as settings
+from nics.net_device import GetNicObj, RemoveNicObj
+
+from .config import AppNameConf, PortConf
+from .dut import Dut
+from .project_dpdk import DPDKdut
+from .settings import LOG_NAME_SEP, NICS, get_netdev, load_global_setting
+from .utils import RED, parallel_lock
+
+
+class VirtDut(DPDKdut):
+
+ """
+ A connection to the CRB under test.
+ This class sends commands to the CRB and validates the responses. It is
+ implemented using either ssh for linuxapp or the terminal server for
+ baremetal.
+ All operations are in fact delegated to an instance of either CRBLinuxApp
+ or CRBBareMetal.
+ """
+
+ def __init__(
+ self, hyper, crb, serializer, virttype, vm_name, suite, cpu_topo, dut_id
+ ):
+ self.vm_ip = crb["IP"]
+ self.NAME = "virtdut" + LOG_NAME_SEP + "%s" % self.vm_ip
+ # do not create addition alt_session
+ super(VirtDut, self).__init__(
+ crb, serializer, dut_id, self.NAME, alt_session=False
+ )
+ self.vm_name = vm_name
+ self.hyper = hyper
+ self.cpu_topo = cpu_topo
+ self.migration_vm = False
+
+ # load port config from suite cfg
+ self.suite = suite
+
+ self.number_of_cores = 0
+ self.tester = None
+ self.cores = []
+ self.architecture = None
+ self.ports_map = []
+ self.virttype = virttype
+ self.prefix_subfix = (
+ str(os.getpid()) + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime())
+ )
+ self.apps_name_conf = {}
+ self.apps_name = {}
+
+ def init_log(self):
+ if hasattr(self.host_dut, "test_classname"):
+ self.logger.config_suite(self.host_dut.test_classname, "virtdut")
+
+ def close(self, force=False):
+ if self.session:
+ self.session.close(force)
+ self.session = None
+ RemoveNicObj(self)
+
+ def set_nic_type(self, nic_type):
+ """
+ Set CRB NICS ready to validated.
+ """
+ self.nic_type = nic_type
+ # vm_dut config will load from vm configuration file
+
+ @parallel_lock()
+ def load_portconf(self):
+ """
+ Load port config for this virtual machine
+ """
+ self.conf = PortConf()
+ self.conf.load_ports_config(self.vm_name)
+ self.ports_cfg = self.conf.get_ports_config()
+
+ @parallel_lock()
+ def detect_portmap(self, dut_id):
+ """
+ Detect port mapping with ping6 message, should be locked for protect
+ tester operations.
+ """
+ # enable tester port ipv6
+ self.host_dut.enable_tester_ipv6()
+
+ self.map_available_ports()
+
+ # disable tester port ipv6
+ self.host_dut.disable_tester_ipv6()
+
+ def load_portmap(self):
+ """
+ Generate port mapping base on loaded port configuration
+ """
+ port_num = len(self.ports_info)
+ self.ports_map = [-1] * port_num
+ for key in list(self.ports_cfg.keys()):
+ index = int(key)
+ if index >= port_num:
+ print(RED("Can not found [%d ]port info" % index))
+ continue
+
+ if "peer" in list(self.ports_cfg[key].keys()):
+ tester_pci = self.ports_cfg[key]["peer"]
+ # find tester_pci index
+ pci_idx = self.tester.get_local_index(tester_pci)
+ self.ports_map[index] = pci_idx
+
+ def set_target(self, target, bind_dev=True, driver_name="", driver_mode=""):
+ """
+ Set env variable, these have to be setup all the time. Some tests
+ need to compile example apps by themselves and will fail otherwise.
+ Set hugepage on DUT and install modules required by DPDK.
+ Configure default ixgbe PMD function.
+ """
+ self.set_toolchain(target)
+
+ # set env variable
+ # These have to be setup all the time. Some tests need to compile
+ # example apps by themselves and will fail otherwise.
+ self.send_expect("export RTE_TARGET=" + target, "#")
+ self.send_expect("export RTE_SDK=`pwd`", "#")
+ if not self.skip_setup:
+ self.build_install_dpdk(target)
+
+ self.setup_memory(hugepages=1024)
+
+ self.setup_modules(target, driver_name, driver_mode)
+
+ if bind_dev:
+ self.bind_interfaces_linux(driver_name)
+
+ def prerequisites(self, pkgName, patch, autodetect_topo):
+ """
+ Prerequest function should be called before execute any test case.
+ Will call function to scan all lcore's information which on DUT.
+ Then call pci scan function to collect nic device information.
+ At last setup DUT' environment for validation.
+ """
+ if not self.skip_setup:
+ self.prepare_package()
+
+ out = self.send_expect("cd %s" % self.base_dir, "# ")
+ assert "No such file or directory" not in out, "Can't switch to dpdk folder!!!"
+ out = self.send_expect("cat VERSION", "# ")
+ if "No such file or directory" in out:
+ self.logger.error("Can't get DPDK version due to VERSION not exist!!!")
+ else:
+ self.dpdk_version = out
+
+ self.send_expect("alias ls='ls --color=none'", "#")
+
+ if self.get_os_type() == "freebsd":
+ self.send_expect("alias make=gmake", "# ")
+ self.send_expect("alias sed=gsed", "# ")
+
+ self.init_core_list()
+ self.pci_devices_information()
+
+ # scan ports before restore interface
+ self.scan_ports()
+
+ # update with real numa id
+ self.update_ports()
+
+ # restore dut ports to kernel
+ # if current vm is migration vm, skip restore dut ports
+ # because there maybe have some app have run
+ if not self.migration_vm:
+ if self.virttype != "XEN":
+ self.restore_interfaces()
+ else:
+ self.restore_interfaces_domu()
+ # rescan ports after interface up
+ self.rescan_ports()
+
+ # no need to rescan ports for guest os just bootup
+ # load port infor from config file
+ self.load_portconf()
+
+ self.mount_procfs()
+
+ if self.ports_cfg:
+ self.load_portmap()
+ else:
+ # if no config ports in port config file, will auto-detect portmap
+ if autodetect_topo:
+ self.detect_portmap(dut_id=self.dut_id)
+
+ # print latest ports_info
+ for port_info in self.ports_info:
+ self.logger.info(port_info)
+
+ # load app name conf
+ name_cfg = AppNameConf()
+ self.apps_name_conf = name_cfg.load_app_name_conf()
+
+ self.apps_name = self.apps_name_conf["meson"]
+ # use the dut target directory instead of 'target' string in app name
+ for app in self.apps_name:
+ cur_app_path = self.apps_name[app].replace("target", self.target)
+ self.apps_name[app] = cur_app_path + " "
+
+ def init_core_list(self):
+ self.cores = []
+ cpuinfo = self.send_expect(
+ 'grep --color=never "processor"' " /proc/cpuinfo", "#"
+ )
+ cpuinfo = cpuinfo.split("\r\n")
+ if self.cpu_topo != "":
+ topo_reg = r"(\d)S/(\d)C/(\d)T"
+ m = re.match(topo_reg, self.cpu_topo)
+ if m:
+ socks = int(m.group(1))
+ cores = int(m.group(2))
+ threads = int(m.group(3))
+ total = socks * cores * threads
+ cores_persock = cores * threads
+ total_phycores = socks * cores
+ # cores should match cpu_topo
+ if total != len(cpuinfo):
+ print(RED("Core number not matched!!!"))
+ else:
+ for core in range(total):
+ thread = core / total_phycores
+ phy_core = core % total_phycores
+ # if this core is hyper core
+ if thread:
+ idx = core % total_phycores
+ socket = idx / cores
+ else:
+ socket = core / cores
+
+ # tricky here, socket must be string
+ self.cores.append(
+ {"thread": core, "socket": str(socket), "core": phy_core}
+ )
+ self.number_of_cores = len(self.cores)
+ return
+
+ # default core map
+ for line in cpuinfo:
+ m = re.search("processor\t: (\d+)", line)
+ if m:
+ thread = m.group(1)
+ socket = 0
+ core = thread
+ self.cores.append({"thread": thread, "socket": socket, "core": core})
+
+ self.number_of_cores = len(self.cores)
+
+ def restore_interfaces_domu(self):
+ """
+ Restore Linux interfaces.
+ """
+ for port in self.ports_info:
+ pci_bus = port["pci"]
+ pci_id = port["type"]
+ driver = settings.get_nic_driver(pci_id)
+ if driver is not None:
+ addr_array = pci_bus.split(":")
+ domain_id = addr_array[0]
+ bus_id = addr_array[1]
+ devfun_id = addr_array[2]
+ port = GetNicObj(self, domain_id, bus_id, devfun_id)
+ itf = port.get_interface_name()
+ self.send_expect("ifconfig %s up" % itf, "# ")
+ time.sleep(30)
+ print(self.send_expect("ip link ls %s" % itf, "# "))
+ else:
+ self.logger.info(
+ "NOT FOUND DRIVER FOR PORT (%s|%s)!!!" % (pci_bus, pci_id)
+ )
+
+ def pci_devices_information(self):
+ self.pci_devices_information_uncached()
+
+ def get_memory_channels(self):
+ """
+ Virtual machine has no memory channel concept, so always return 1
+ """
+ return 1
+
+ def check_ports_available(self, pci_bus, pci_id):
+ """
+ Check that whether auto scanned ports ready to use
+ """
+ pci_addr = "%s:%s" % (pci_bus, pci_id)
+ if pci_id == "8086:100e":
+ return False
+ return True
+ # load vm port conf need another function
+ # need add virtual function device into NICS
+
+ def scan_ports(self):
+ """
+ Scan ports information, for vm will always scan
+ """
+ self.scan_ports_uncached()
+
+ def scan_ports_uncached(self):
+ """
+ Scan ports and collect port's pci id, mac address, ipv6 address.
+ """
+ scan_ports_uncached = getattr(
+ self, "scan_ports_uncached_%s" % self.get_os_type()
+ )
+ return scan_ports_uncached()
+
+ def update_ports(self):
+ """
+ Update ports information, according to host pci
+ """
+ for port in self.ports_info:
+ vmpci = port["pci"]
+ for pci_map in self.hyper.pci_maps:
+ # search pci mapping structure
+ if vmpci == pci_map["guestpci"]:
+ hostpci = pci_map["hostpci"]
+ # search host port info structure
+ for hostport in self.host_dut.ports_info:
+ # update port numa
+ if hostpci == hostport["pci"]:
+ port["numa"] = hostport["numa"]
+ port["port"].socket = hostport["numa"]
+ break
+ if (
+ "sriov_vfs_pci" in hostport
+ and hostpci in hostport["sriov_vfs_pci"]
+ ):
+ port["numa"] = hostport["numa"]
+ port["port"].socket = hostport["numa"]
+ break
+
+ def map_available_ports(self):
+ """
+ Load or generate network connection mapping list.
+ """
+ self.map_available_ports_uncached()
+ self.logger.warning("VM DUT PORT MAP: " + str(self.ports_map))
+
+ def map_available_ports_uncached(self):
+ """
+ Generate network connection mapping list.
+ """
+ nrPorts = len(self.ports_info)
+ if nrPorts == 0:
+ return
+
+ remove = []
+ self.ports_map = [-1] * nrPorts
+
+ hits = [False] * len(self.tester.ports_info)
+
+ for vmPort in range(nrPorts):
+ vmpci = self.ports_info[vmPort]["pci"]
+ peer = self.get_peer_pci(vmPort)
+ # if peer pci configured
+ if peer is not None:
+ for remotePort in range(len(self.tester.ports_info)):
+ if self.tester.ports_info[remotePort]["pci"] == peer:
+ hits[remotePort] = True
+ self.ports_map[vmPort] = remotePort
+ break
+ if self.ports_map[vmPort] == -1:
+ self.logger.error("CONFIGURED TESTER PORT CANNOT FOUND!!!")
+ else:
+ continue # skip ping6 map
+
+ # strip pci address on host for pass-through device
+ hostpci = "N/A"
+ for pci_map in self.hyper.pci_maps:
+ if vmpci == pci_map["guestpci"]:
+ hostpci = pci_map["hostpci"]
+ break
+
+ # auto ping port map
+ for remotePort in range(len(self.tester.ports_info)):
+ # for two vfs connected to same tester port
+ # need skip ping from devices on same pf device
+ remotepci = self.tester.ports_info[remotePort]["pci"]
+ port_type = self.tester.ports_info[remotePort]["type"]
+ # IXIA port should not check whether has vfs
+ if port_type.lower() not in ("ixia", "trex"):
+ remoteport = self.tester.ports_info[remotePort]["port"]
+ vfs = []
+ # vm_dut and tester in same dut
+ host_ip = self.crb["IP"].split(":")[0]
+ if self.crb["tester IP"] == host_ip:
+ vfs = remoteport.get_sriov_vfs_pci()
+ # if hostpci is vf of tester port
+ if hostpci == remotepci or hostpci in vfs:
+ print(RED("Skip ping from same PF device"))
+ continue
+
+ ipv6 = self.get_ipv6_address(vmPort)
+ if ipv6 == "Not connected":
+ continue
+
+ out = self.tester.send_ping6(
+ remotePort, ipv6, self.get_mac_address(vmPort)
+ )
+
+ if out and "64 bytes from" in out:
+ self.logger.info(
+ "PORT MAP: [dut %d: tester %d]" % (vmPort, remotePort)
+ )
+ self.ports_map[vmPort] = remotePort
+ hits[remotePort] = True
+ continue
+
+ def kill_all(self, alt_session=False):
+ """
+ Kill all dpdk applications on VM
+ """
+ control = getattr(self.hyper, "control_session", None)
+ if callable(control):
+ out = control("lsof -Fp /var/run/.rte_config")
+ pids = []
+ pid_reg = r"p(\d+)"
+ if len(out):
+ lines = out.split("\r\n")
+ for line in lines:
+ m = re.match(pid_reg, line)
+ if m:
+ pids.append(m.group(1))
+ for pid in pids:
+ control("kill -9 %s" % pid)
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 11/23] dts: merge DTS framework/virt_resource.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (9 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 10/23] dts: merge DTS framework/virt_dut.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 12/23] dts: merge DTS framework/virt_scene.py " Juraj Linkeš
` (11 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/virt_resource.py | 584 +++++++++++++++++++++++++++++++++
1 file changed, 584 insertions(+)
create mode 100644 dts/framework/virt_resource.py
diff --git a/dts/framework/virt_resource.py b/dts/framework/virt_resource.py
new file mode 100644
index 0000000000..36b6fe9c71
--- /dev/null
+++ b/dts/framework/virt_resource.py
@@ -0,0 +1,584 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from random import randint
+
+from .utils import RED, get_obj_funcs, parallel_lock
+
+INIT_FREE_PORT = 6000
+INIT_SERIAL_PORT = 7000
+INIT_MIGRATE_PORT = 8000
+INIT_DISPLAY_PORT = 0
+
+QuickScan = True
+
+
+class VirtResource(object):
+
+ """
+ Class handle dut resource, like cpu, memory, net devices
+ """
+
+ def __init__(self, dut):
+ self.dut = dut
+
+ self.cores = [int(core["thread"]) for core in dut.cores]
+ # initialized unused cores
+ self.unused_cores = self.cores[:]
+ # initialized used cores
+ self.used_cores = [-1] * len(self.unused_cores)
+
+ self.ports_info = dut.ports_info
+ # initialized unused ports
+ self.ports = [port["pci"] for port in dut.ports_info]
+ self.unused_ports = self.ports[:]
+ # initialized used ports
+ self.used_ports = ["unused"] * len(self.unused_ports)
+
+ # initialized vf ports
+ self.vfs_info = []
+ self.vfs = []
+ self.unused_vfs = []
+ self.used_vfs = []
+
+ # save allocated cores and related vm
+ self.allocated_info = {}
+
+ def __port_isused(self, pci):
+ return pci in self.used_ports
+
+ def __port_used(self, pci):
+ index = self.ports.index(pci)
+ self.used_ports[index] = pci
+ self.unused_ports[index] = "used"
+
+ def __port_unused(self, pci):
+ index = self.ports.index(pci)
+ self.unused_ports[index] = pci
+ self.used_ports[index] = "unused"
+
+ def __port_on_socket(self, pci, socket):
+ for port in self.ports_info:
+ if port["pci"] == pci:
+ if socket == -1:
+ return True
+
+ if port["numa"] == socket:
+ return True
+ else:
+ return False
+
+ return False
+
+ def __vf_used(self, pci):
+ index = self.vfs.index(pci)
+ self.used_vfs[index] = pci
+ self.unused_vfs[index] = "used"
+
+ def __vf_unused(self, pci):
+ index = self.vfs.index(pci)
+ self.used_vfs[index] = "unused"
+ self.unused_vfs[index] = pci
+
+ def __core_used(self, core):
+ core = int(core)
+ index = self.cores.index(core)
+ self.used_cores[index] = core
+ self.unused_cores[index] = -1
+
+ def __core_unused(self, core):
+ core = int(core)
+ index = self.cores.index(core)
+ self.unused_cores[index] = core
+ self.used_cores[index] = -1
+
+ def __core_on_socket(self, core, socket):
+ for dut_core in self.dut.cores:
+ if int(dut_core["thread"]) == core:
+ if socket == -1:
+ return True
+
+ if int(dut_core["socket"]) == socket:
+ return True
+ else:
+ return False
+
+ return False
+
+ def __core_isused(self, core):
+ index = self.cores.index(core)
+ if self.used_cores[index] != -1:
+ return True
+ else:
+ return False
+
+ def reserve_cpu(self, coremask=""):
+ """
+ Reserve dpdk used cpus by mask
+ """
+ val = int(coremask, base=16)
+ cpus = []
+ index = 0
+ while val != 0:
+ if val & 0x1:
+ cpus.append(index)
+
+ val = val >> 1
+ index += 1
+
+ for cpu in cpus:
+ self.__core_used(cpu)
+
+ @parallel_lock()
+ def alloc_cpu(self, vm="", number=-1, socket=-1, corelist=None):
+ """
+ There're two options for request cpu resource for vm.
+ If number is not -1, just allocate cpu from not used cores.
+ If list is not None, will allocate cpu after checked.
+ """
+ cores = []
+
+ if vm == "":
+ print("Alloc cpu request virtual machine name!!!")
+ return cores
+
+ # if vm has been allocated cores, just return them
+ if self.__vm_has_resource(vm, "cores"):
+ return self.allocated_info[vm]["cores"]
+
+ if number != -1:
+ for core in self.unused_cores:
+ if core != -1 and number != 0:
+ if self.__core_on_socket(core, socket) is True:
+ self.__core_used(core)
+ cores.append(str(core))
+ number = number - 1
+ if number != 0:
+ print("Can't allocated requested cpu!!!")
+
+ if corelist is not None:
+ for core in corelist:
+ if self.__core_isused(int(core)) is True:
+ print("Core %s has been used!!!" % core)
+ else:
+ if self.__core_on_socket(int(core), socket) is True:
+ self.__core_used(int(core))
+ cores.append(core)
+
+ if vm not in self.allocated_info:
+ self.allocated_info[vm] = {}
+
+ self.allocated_info[vm]["cores"] = cores
+ return cores
+
+ def __vm_has_resource(self, vm, resource=""):
+ if vm == "":
+ self.dut.logger.info("VM name can't be NULL!!!")
+ raise Exception("VM name can't be NULL!!!")
+ if vm not in self.allocated_info:
+ self.dut.logger.info("There is no resource allocated to VM [%s]." % vm)
+ return False
+ if resource == "":
+ return True
+ if resource not in self.allocated_info[vm]:
+ self.dut.logger.info(
+ "There is no resource [%s] allocated to VM [%s] " % (resource, vm)
+ )
+ return False
+ return True
+
+ @parallel_lock()
+ def free_cpu(self, vm):
+ if self.__vm_has_resource(vm, "cores"):
+ for core in self.allocated_info[vm]["cores"]:
+ self.__core_unused(core)
+ self.allocated_info[vm].pop("cores")
+
+ @parallel_lock()
+ def alloc_pf(self, vm="", number=-1, socket=-1, pflist=[]):
+ """
+ There're two options for request pf devices for vm.
+ If number is not -1, just allocate pf device from not used pfs.
+ If list is not None, will allocate pf devices after checked.
+ """
+ ports = []
+
+ if number != -1:
+ for pci in self.unused_ports:
+ if pci != "unused" and number != 0:
+ if self.__port_on_socket(pci, socket) is True:
+ self.__port_used(pci)
+ ports.append(pci)
+ number = number - 1
+ if number != 0:
+ print("Can't allocated requested PF devices!!!")
+
+ if pflist is not None:
+ for pci in pflist:
+ if self.__port_isused(pci) is True:
+ print("Port %s has been used!!!" % pci)
+ else:
+ if self.__port_on_socket(pci, socket) is True:
+ self.__port_used(pci)
+ ports.append(pci)
+
+ if vm not in self.allocated_info:
+ self.allocated_info[vm] = {}
+
+ self.allocated_info[vm]["ports"] = ports
+ return ports
+
+ @parallel_lock()
+ def free_pf(self, vm):
+ if self.__vm_has_resource(vm, "ports"):
+ for pci in self.allocated_info[vm]["ports"]:
+ self.__port_unused(pci)
+ self.allocated_info[vm].pop("ports")
+
+ @parallel_lock()
+ def alloc_vf_from_pf(self, vm="", pf_pci="", number=-1, vflist=[]):
+ """
+ There're two options for request vf devices of pf device.
+ If number is not -1, just allocate vf device from not used vfs.
+ If list is not None, will allocate vf devices after checked.
+ """
+ vfs = []
+ if vm == "":
+ print("Alloc VF request vitual machine name!!!")
+ return vfs
+
+ if pf_pci == "":
+ print("Alloc VF request PF pci address!!!")
+ return vfs
+
+ for vf_info in self.vfs_info:
+ if vf_info["pf_pci"] == pf_pci:
+ if vf_info["pci"] in vflist:
+ vfs.append(vf_info["pci"])
+ continue
+
+ if number > 0:
+ vfs.append(vf_info["pci"])
+ number = number - 1
+
+ for vf in vfs:
+ self.__vf_used(vf)
+
+ if vm not in self.allocated_info:
+ self.allocated_info[vm] = {}
+
+ self.allocated_info[vm]["vfs"] = vfs
+ return vfs
+
+ @parallel_lock()
+ def free_vf(self, vm):
+ if self.__vm_has_resource(vm, "vfs"):
+ for pci in self.allocated_info[vm]["vfs"]:
+ self.__vf_unused(pci)
+ self.allocated_info[vm].pop("vfs")
+
+ @parallel_lock()
+ def add_vf_on_pf(self, pf_pci="", vflist=[]):
+ """
+ Add vf devices generated by specified pf devices.
+ """
+ # add vfs into vf info list
+ vfs = []
+ for vf in vflist:
+ if vf not in self.vfs:
+ self.vfs_info.append({"pci": vf, "pf_pci": pf_pci})
+ vfs.append(vf)
+ used_vfs = ["unused"] * len(vflist)
+ self.unused_vfs += vfs
+ self.used_vfs += used_vfs
+ self.vfs += vfs
+
+ @parallel_lock()
+ def del_vf_on_pf(self, pf_pci="", vflist=[]):
+ """
+ Remove vf devices generated by specified pf devices.
+ """
+ vfs = []
+ for vf in vflist:
+ for vfs_info in self.vfs_info:
+ if vfs_info["pci"] == vf:
+ vfs.append(vf)
+
+ for vf in vfs:
+ try:
+ index = self.vfs.index(vf)
+ except:
+ continue
+ del self.vfs_info[index]
+ del self.unused_vfs[index]
+ del self.used_vfs[index]
+ del self.vfs[index]
+
+ @parallel_lock()
+ def _check_port_allocated(self, port):
+ """
+ Check whether port has been pre-allocated
+ """
+ for vm_info in list(self.allocated_info.values()):
+ if "hostport" in vm_info and port == vm_info["hostport"]:
+ return True
+ if "serialport" in vm_info and port == vm_info["serialport"]:
+ return True
+ if "migrateport" in vm_info and port == vm_info["migrateport"]:
+ return True
+ if "displayport" in vm_info and port == (vm_info["displayport"] + 5900):
+ return True
+ return False
+
+ @parallel_lock()
+ def alloc_port(self, vm="", port_type="connect"):
+ """
+ Allocate unused host port for vm
+ """
+ global INIT_FREE_PORT
+ global INIT_SERIAL_PORT
+ global INIT_MIGRATE_PORT
+ global INIT_DISPLAY_PORT
+
+ if vm == "":
+ print("Alloc host port request vitual machine name!!!")
+ return None
+
+ if port_type == "connect":
+ port = INIT_FREE_PORT
+ elif port_type == "serial":
+ port = INIT_SERIAL_PORT
+ elif port_type == "migrate":
+ port = INIT_MIGRATE_PORT
+ elif port_type == "display":
+ port = INIT_DISPLAY_PORT + 5900
+
+ while True:
+ if (
+ self.dut.check_port_occupied(port) is False
+ and self._check_port_allocated(port) is False
+ ):
+ break
+ else:
+ port += 1
+ continue
+
+ if vm not in self.allocated_info:
+ self.allocated_info[vm] = {}
+
+ if port_type == "connect":
+ self.allocated_info[vm]["hostport"] = port
+ elif port_type == "serial":
+ self.allocated_info[vm]["serialport"] = port
+ elif port_type == "migrate":
+ self.allocated_info[vm]["migrateport"] = port
+ elif port_type == "display":
+ port -= 5900
+ self.allocated_info[vm]["displayport"] = port
+
+ # do not scan port from the beginning
+ if QuickScan:
+ if port_type == "connect":
+ INIT_FREE_PORT = port
+ elif port_type == "serial":
+ INIT_SERIAL_PORT = port
+ elif port_type == "migrate":
+ INIT_MIGRATE_PORT = port
+ elif port_type == "display":
+ INIT_DISPLAY_PORT = port
+
+ return port
+
+ @parallel_lock()
+ def free_port(self, vm):
+ if self.__vm_has_resource(vm, "hostport"):
+ self.allocated_info[vm].pop("hostport")
+ if self.__vm_has_resource(vm, "serialport"):
+ self.allocated_info[vm].pop("serialport")
+ if self.__vm_has_resource(vm, "migrateport"):
+ self.allocated_info[vm].pop("migrateport")
+ if self.__vm_has_resource(vm, "displayport"):
+ self.allocated_info[vm].pop("displayport")
+
+ @parallel_lock()
+ def free_all_resource(self, vm):
+ """
+ Free all resource VM has been allocated.
+ """
+ self.free_port(vm)
+ self.free_vf(vm)
+ self.free_pf(vm)
+ self.free_cpu(vm)
+
+ if self.__vm_has_resource(vm):
+ self.allocated_info.pop(vm)
+
+ def get_cpu_on_vm(self, vm=""):
+ """
+ Return core list on specified VM.
+ """
+ if vm in self.allocated_info:
+ if "cores" in self.allocated_info[vm]:
+ return self.allocated_info[vm]["cores"]
+
+ def get_vfs_on_vm(self, vm=""):
+ """
+ Return vf device list on specified VM.
+ """
+ if vm in self.allocated_info:
+ if "vfs" in self.allocated_info[vm]:
+ return self.allocated_info[vm]["vfs"]
+
+ def get_pfs_on_vm(self, vm=""):
+ """
+ Return pf device list on specified VM.
+ """
+ if vm in self.allocated_info:
+ if "ports" in self.allocated_info[vm]:
+ return self.allocated_info[vm]["ports"]
+
+
+class simple_dut(object):
+ def __init__(self):
+ self.ports_info = []
+ self.cores = []
+
+ def check_port_occupied(self, port):
+ return False
+
+
+if __name__ == "__main__":
+ dut = simple_dut()
+ dut.cores = [
+ {"thread": "1", "socket": "0"},
+ {"thread": "2", "socket": "0"},
+ {"thread": "3", "socket": "0"},
+ {"thread": "4", "socket": "0"},
+ {"thread": "5", "socket": "0"},
+ {"thread": "6", "socket": "0"},
+ {"thread": "7", "socket": "1"},
+ {"thread": "8", "socket": "1"},
+ {"thread": "9", "socket": "1"},
+ {"thread": "10", "socket": "1"},
+ {"thread": "11", "socket": "1"},
+ {"thread": "12", "socket": "1"},
+ ]
+
+ dut.ports_info = [
+ {
+ "intf": "p786p1",
+ "source": "cfg",
+ "mac": "90:e2:ba:69:e5:e4",
+ "pci": "08:00.0",
+ "numa": 0,
+ "ipv6": "fe80::92e2:baff:fe69:e5e4",
+ "peer": "IXIA:6.5",
+ "type": "8086:10fb",
+ },
+ {
+ "intf": "p786p2",
+ "source": "cfg",
+ "mac": "90:e2:ba:69:e5:e5",
+ "pci": "08:00.1",
+ "numa": 0,
+ "ipv6": "fe80::92e2:baff:fe69:e5e5",
+ "peer": "IXIA:6.6",
+ "type": "8086:10fb",
+ },
+ {
+ "intf": "p787p1",
+ "source": "cfg",
+ "mac": "90:e2:ba:69:e5:e6",
+ "pci": "84:00.0",
+ "numa": 1,
+ "ipv6": "fe80::92e2:baff:fe69:e5e6",
+ "peer": "IXIA:6.7",
+ "type": "8086:10fb",
+ },
+ {
+ "intf": "p787p2",
+ "source": "cfg",
+ "mac": "90:e2:ba:69:e5:e7",
+ "pci": "84:00.1",
+ "numa": 1,
+ "ipv6": "fe80::92e2:baff:fe69:e5e7",
+ "peer": "IXIA:6.8",
+ "type": "8086:10fb",
+ },
+ ]
+
+ virt_pool = VirtResource(dut)
+ print("Alloc two PF devices on socket 1 from VM")
+ print(virt_pool.alloc_pf(vm="test1", number=2, socket=1))
+
+ virt_pool.add_vf_on_pf(
+ pf_pci="08:00.0", vflist=["08:10.0", "08:10.2", "08:10.4", "08:10.6"]
+ )
+ virt_pool.add_vf_on_pf(
+ pf_pci="08:00.1", vflist=["08:10.1", "08:10.3", "08:10.5", "08:10.7"]
+ )
+ print("Add VF devices to resource pool")
+ print(virt_pool.vfs_info)
+
+ print("Alloc VF device from resource pool")
+ print(virt_pool.alloc_vf_from_pf(vm="test1", pf_pci="08:00.0", number=2))
+ print(virt_pool.used_vfs)
+ print("Alloc VF device from resource pool")
+ print(
+ virt_pool.alloc_vf_from_pf(
+ vm="test2", pf_pci="08:00.1", vflist=["08:10.3", "08:10.5"]
+ )
+ )
+ print(virt_pool.used_vfs)
+
+ print("Del VF devices from resource pool")
+ virt_pool.del_vf_on_pf(pf_pci="08:00.0", vflist=["08:10.4", "08:10.2"])
+ print(virt_pool.vfs_info)
+
+ virt_pool.reserve_cpu("e")
+ print("Reserve three cores from resource pool")
+ print(virt_pool.unused_cores)
+ print("Alloc two cores on socket1 for VM-test1")
+ print(virt_pool.alloc_cpu(vm="test1", number=2, socket=1))
+ print("Alloc two cores in list for VM-test2")
+ print(virt_pool.alloc_cpu(vm="test2", corelist=["4", "5"]))
+ print("Alloc two cores for VM-test3")
+ print(virt_pool.alloc_cpu(vm="test3", number=2))
+ print("Alloc port for VM-test1")
+ print(virt_pool.alloc_port(vm="test1"))
+ print("Alloc information after allocated")
+ print(virt_pool.allocated_info)
+
+ print("Get cores on VM-test1")
+ print(virt_pool.get_cpu_on_vm("test1"))
+ print("Get pfs on VM-test1")
+ print(virt_pool.get_pfs_on_vm("test1"))
+ print("Get vfs on VM-test2")
+ print(virt_pool.get_vfs_on_vm("test2"))
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 12/23] dts: merge DTS framework/virt_scene.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (10 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 11/23] dts: merge DTS framework/virt_resource.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 13/23] dts: merge DTS nics/__init__.py " Juraj Linkeš
` (10 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/virt_scene.py | 560 ++++++++++++++++++++++++++++++++++++
1 file changed, 560 insertions(+)
create mode 100644 dts/framework/virt_scene.py
diff --git a/dts/framework/virt_scene.py b/dts/framework/virt_scene.py
new file mode 100644
index 0000000000..63760192c3
--- /dev/null
+++ b/dts/framework/virt_scene.py
@@ -0,0 +1,560 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import time
+
+import framework.utils as utils
+
+from .config import VIRTCONF, VirtConf
+from .exception import *
+from .pmd_output import PmdOutput
+from .qemu_kvm import QEMUKvm
+from .settings import CONFIG_ROOT_PATH, get_netdev
+from .utils import create_mask
+
+# scenario module for handling scenario
+# 1. load configurations
+# config saved in $DTS_CFG_FOLDER/scenarios/name.cfg
+# load configurations will saved in vm list
+# 2. handle special config
+# pf_idx=0,vf_num=2,driver=default;
+# PF0 igb_uio, create 2VFs by default driver
+# 3. create scenario
+# allocate hardware resource for this vm
+# cpu, memory, pf devices, vf devices
+# configuration vm
+# run pre_vm commands
+# create vm
+# run post_vm commands
+
+
+class VirtScene(object):
+ def __init__(self, dut, tester, scene_name):
+ self.name = scene_name
+ self.host_dut = dut
+ self.tester_dut = tester
+ self.vm_dut = None
+ self.pre_cmds = []
+ self.post_cmds = []
+
+ self.vm_dut_enable = False
+ self.auto_portmap = True
+ self.vm_type = "kvm"
+ self.def_target = "x86_64-native-linuxapp-gcc"
+ self.host_bound = False
+
+ # for vm dut init_log
+ self.host_dut.test_classname = "dts"
+
+ def load_config(self):
+ try:
+ self.vm_confs = {}
+ conf = VirtConf(CONFIG_ROOT_PATH + "/scene/" + self.name + ".cfg")
+ self.sections = conf.virt_conf.get_sections()
+ for vm in self.sections:
+ conf.load_virt_config(vm)
+ vm_conf = conf.get_virt_config()
+ self.vm_confs[vm] = vm_conf
+ except:
+ raise VirtConfigParseException
+
+ def prepare_vm(self):
+ host_cfg = None
+ for conf in list(self.vm_confs.keys()):
+ if conf == "scene":
+ for cfg in self.vm_confs["scene"]:
+ if "suite" in list(cfg.keys()):
+ self.prepare_suite(cfg["suite"])
+ if "host" in list(cfg.keys()):
+ self.host_bound = True
+ host_cfg = cfg["host"][0]
+ self.vm_confs.pop("scene")
+ else:
+ vm_name = conf
+ vm_conf = self.vm_confs[vm_name]
+ self.prepare_cpu(vm_name, vm_conf)
+ self.prepare_devices(vm_conf)
+ self.prepare_vmdevice(vm_conf)
+
+ # dpdk should start after vf devices created
+ if host_cfg:
+ self.prepare_host(**host_cfg)
+
+ def cleanup_vm(self):
+ # reload config for has been changed when handle config
+ self.load_config()
+ for conf in list(self.vm_confs.keys()):
+ if conf != "scene":
+ vm_name = conf
+ vm_conf = self.vm_confs[vm_name]
+ self.cleanup_devices(vm_conf)
+
+ def prepare_suite(self, conf):
+ for param in conf:
+ if "dut" in list(param.keys()):
+ if param["dut"] == "vm_dut":
+ self.vm_dut_enable = True
+ if "type" in list(param.keys()):
+ if param["type"] == "xen":
+ self.vm_type = "xen"
+ # not implement yet
+ if param["type"] == "vmware":
+ self.vm_type = "vmware"
+ # not implement yet
+ if param["type"] == "container":
+ self.vm_type = "container"
+ if "portmap" in list(param.keys()):
+ if param["portmap"] == "cfg":
+ self.auto_portmap = False
+
+ def prepare_host(self, **opts):
+ if "dpdk" not in list(opts.keys()):
+ print(utils.RED("Scenario host parameter request dpdk option!!!"))
+ raise VirtConfigParamException("host")
+
+ if "cores" not in list(opts.keys()):
+ print(utils.RED("Scenario host parameter request cores option!!!"))
+ raise VirtConfigParamException("host")
+
+ if "target" in list(opts.keys()):
+ target = opts["target"]
+ else:
+ target = self.def_target
+
+ self.host_dut.set_target(target, bind_dev=True)
+
+ if opts["dpdk"] == "testpmd":
+ self.pmdout = PmdOutput(self.host_dut)
+ cores = opts["cores"].split()
+ out = self.pmdout.start_testpmd(cores)
+ if "Error" in out:
+ raise VirtHostPrepareException()
+
+ def prepare_cpu(self, vm_name, conf):
+ cpu_param = {}
+ for params in conf:
+ if "cpu" in list(params.keys()):
+ cpu_conf = params["cpu"][0]
+ break
+
+ if "skipcores" in list(cpu_conf.keys()):
+ cpus = cpu_conf["skipcores"].split()
+ # remove invalid configured core
+ for cpu in cpus:
+ if int(cpu) not in self.host_dut.virt_pool.cores:
+ cpus.remove(cpu)
+ # create core mask for reserved cores
+ core_mask = create_mask(cpus)
+ # reserve those skipped cores
+ self.host_dut.virt_pool.reserve_cpu(core_mask)
+
+ if "numa" in list(cpu_conf.keys()):
+ if cpu_conf["numa"] == "auto":
+ numa = self.host_dut.ports_info[0]["port"].socket
+ else:
+ numa = int(cpu_conf["numa"])
+ else:
+ numa = 0
+
+ if "number" in list(cpu_conf.keys()):
+ num = int(cpu_conf["number"])
+ else:
+ num = 2
+
+ if "model" in list(cpu_conf.keys()):
+ model = cpu_conf["model"]
+ else:
+ model = "host"
+
+ cpu_topo = ""
+ if "cpu_topo" in list(cpu_conf.keys()):
+ cpu_topo = cpu_conf["cpu_topo"]
+
+ pin_cores = []
+ if "cpu_pin" in list(cpu_conf.keys()):
+ pin_cores = cpu_conf["cpu_pin"].split()
+
+ if len(pin_cores):
+ cores = self.host_dut.virt_pool.alloc_cpu(vm=vm_name, corelist=pin_cores)
+ else:
+ cores = self.host_dut.virt_pool.alloc_cpu(
+ vm=vm_name, number=num, socket=numa
+ )
+ core_cfg = ""
+ for core in cores:
+ core_cfg += "%s " % core
+ core_cfg = core_cfg[:-1]
+
+ cpu_param["number"] = num
+ cpu_param["model"] = model
+ cpu_param["cpupin"] = core_cfg
+ cpu_param["cputopo"] = cpu_topo
+
+ # replace with allocated cpus
+ params["cpu"] = [cpu_param]
+
+ def prepare_devices(self, conf):
+ for params in conf:
+ if "dev_gen" in list(params.keys()):
+ index = conf.index(params)
+ for param in params["dev_gen"]:
+ self.handle_dev_gen(**param)
+ # remove handled 'dev_gen' configuration
+ conf.remove(conf[index])
+
+ def cleanup_devices(self, conf):
+ for params in conf:
+ if "dev_gen" in list(params.keys()):
+ for param in params["dev_gen"]:
+ self.handle_dev_destroy(**param)
+
+ def prepare_vmdevice(self, conf):
+ for params in conf:
+ if "device" in list(params.keys()):
+ for param in params["device"]:
+ if "vf_idx" in list(param.keys()):
+ new_param = self.prepare_vf_conf(param)
+ index = params["device"].index(param)
+ params["device"][index] = new_param
+ elif "pf_idx" in list(param.keys()):
+ new_param = self.prepare_pf_conf(param)
+ index = params["device"].index(param)
+ params["device"][index] = new_param
+
+ for param in params["device"]:
+ netdev = get_netdev(self.host_dut, param["opt_host"])
+ if netdev is not None:
+ netdev.bind_driver("pci-stub")
+
+ def prepare_pf_conf(self, param):
+ pf_param = {}
+ # strip pf pci id
+ pf = int(param["pf_idx"])
+ if pf >= len(self.host_dut.ports_info):
+ raise VirtDeviceCreateException
+ pf_pci = self.host_dut.ports_info[pf]["pci"]
+ pf_param["driver"] = "pci-assign"
+ pf_param["opt_host"] = pf_pci
+ if param["guestpci"] != "auto":
+ pf_param["opt_addr"] = param["guestpci"]
+
+ return pf_param
+
+ def prepare_vf_conf(self, param):
+ vf_param = {}
+ # strip vf pci id
+ if "pf_dev" in list(param.keys()):
+ pf = int(param["pf_dev"])
+ pf_net = self.host_dut.ports_info[pf]["port"]
+ vfs = self.host_dut.ports_info[pf]["vfs_port"]
+ vf_idx = int(param["vf_idx"])
+ if vf_idx >= len(vfs):
+ raise VirtDeviceCreateException
+ vf_pci = vfs[vf_idx].pci
+ vf_param["driver"] = "pci-assign"
+ vf_param["opt_host"] = vf_pci
+ if param["guestpci"] != "auto":
+ vf_param["opt_addr"] = param["guestpci"]
+ if "mac" in list(param.keys()):
+ pf_net.set_vf_mac_addr(vf_idx, param["mac"])
+ else:
+ print(utils.RED("Invalid vf device config, request pf_dev"))
+
+ return vf_param
+
+ def reset_pf_cmds(self, port):
+ command = {}
+ command["type"] = "host"
+ if not self.host_bound:
+ intf = self.host_dut.ports_info[port]["intf"]
+ command["command"] = "ifconfig %s up" % intf
+ self.reg_postvm_cmds(command)
+
+ def handle_dev_gen(self, **opts):
+ if "pf_idx" in list(opts.keys()):
+ port = int(opts["pf_idx"])
+ if "vf_num" in list(opts.keys()):
+ vf_num = int(opts["vf_num"])
+ else:
+ print(utils.RED("No vf_num for port %d, assum one VF" % port))
+ vf_num = 1
+ if "driver" in list(opts.keys()):
+ driver = opts["driver"]
+
+ try:
+ print(utils.GREEN("create vf %d %d %s" % (port, vf_num, driver)))
+ self.host_dut.generate_sriov_vfs_by_port(port, vf_num, driver)
+ self.reset_pf_cmds(port)
+ except:
+ print(utils.RED("Failed to create vf as requested!!!"))
+ raise VirtDeviceCreateException
+
+ def handle_dev_destroy(self, **opts):
+ if "pf_idx" in list(opts.keys()):
+ port = int(opts["pf_idx"])
+
+ try:
+ print(utils.GREEN("destroy vfs on port %d" % port))
+ self.host_dut.destroy_sriov_vfs_by_port(port)
+ except:
+ print(utils.RED("Failed to destroy vf as requested!!!"))
+
+ def reg_prevm_cmds(self, command):
+ """
+ command: {'type':'host/tester/vm',
+ define which crb command progress
+ 'command':'XXX',
+ command send to crb
+ 'expect':'XXX',
+ expected output for command
+ 'timeout': 60,
+ 'verify': True or False
+ check whether command successfully
+ }
+ """
+ self.pre_cmds.append(command)
+
+ def run_pre_cmds(self):
+ for cmd in self.pre_cmds:
+ if cmd["type"] == "vm":
+ print(utils.RED("Can't run vm command when vm not ready"))
+ elif cmd["type"] == "host":
+ crb = self.host_dut
+ elif cmd["type"] == "tester":
+ crb = self.tester_dut
+ else:
+ crb = self.host_dut
+
+ if "expect" not in list(cmd.keys()):
+ expect = "# "
+ else:
+ expect = cmd["expect"]
+
+ if "verify" not in list(cmd.keys()):
+ verify = False
+ else:
+ verify = cmd["verify"]
+
+ if "timeout" not in list(cmd.keys()):
+ timeout = 5
+ else:
+ timeout = cmd["timeout"]
+
+ ret = crb.send_expect(
+ cmd["command"], expect, timeout=timeout, verify=verify
+ )
+
+ if type(ret) is int and ret != 0:
+ print(utils.RED("Failed to run command %s" % cmd["command"]))
+ raise VirtVmOperationException
+
+ def reg_postvm_cmds(self, command):
+ """
+ command: {'type':'host/tester/vm',
+ define which crb command progress
+ 'command':'XXX',
+ command send to crb
+ 'expect':'XXX',
+ expected output for command
+ 'verify':'yes or no'
+ check whether command successfully
+ """
+ self.post_cmds.append(command)
+ pass
+
+ def run_post_cmds(self):
+ for cmd in self.post_cmds:
+ if cmd["type"] == "vm":
+ crb = self.vm_dut
+ elif cmd["type"] == "host":
+ crb = self.host_dut
+ elif cmd["type"] == "tester":
+ crb = self.tester_dut
+ else:
+ crb = self.host_dut
+
+ if "expect" not in list(cmd.keys()):
+ expect = "# "
+ else:
+ expect = cmd["expect"]
+
+ if "verify" not in list(cmd.keys()):
+ verify = False
+ else:
+ verify = cmd["verify"]
+
+ if "timeout" not in list(cmd.keys()):
+ timeout = 5
+ else:
+ timeout = cmd["timeout"]
+
+ ret = crb.send_expect(
+ cmd["command"], expect, timeout=timeout, verify=verify
+ )
+
+ if type(ret) is int and ret != 0:
+ print(utils.RED("Failed to run command %s" % cmd["command"]))
+ raise VirtVmOperationException
+
+ def merge_params(self, vm, params):
+ for param in params:
+ index = vm.find_option_index(list(param.keys())[0])
+ if index is not None:
+ vm.params[index] = param
+ else:
+ vm.params.append(param)
+ index = vm.find_option_index("name")
+ # update vm name
+ vm.params[index]["name"][0]["name"] = vm.vm_name
+
+ def get_cputopo(self, params):
+ for param in params:
+ if "cpu" in list(param.keys()):
+ cpu_topo = param["cpu"][0]["cputopo"]
+ return cpu_topo
+
+ def start_vms(self):
+ self.vms = []
+ if self.vm_type == "kvm":
+ for vm_name in list(self.vm_confs.keys()):
+ # tricky here, QEMUKvm based on suite and vm name
+ # suite is virt_global, vm_name just the type
+ vm = QEMUKvm(self.host_dut, self.vm_type.upper(), "virt_global")
+ vm.load_config()
+ vm.vm_name = vm_name
+ vm.set_vm_default()
+ # merge default config and scene config
+ scene_params = self.vm_confs[vm_name]
+ # reload merged configurations
+ self.merge_params(vm, scene_params)
+ # get cpu topo
+ topo = self.get_cputopo(scene_params)
+ try:
+ vm_dut = vm.start(
+ load_config=False, set_target=False, cpu_topo=topo
+ )
+ if vm_dut is None:
+ raise Exception("Set up VM ENV failed!")
+
+ vm_info = {}
+ vm_info[vm_name] = vm
+ vm_info[vm_name + "_session"] = vm_dut
+ self.vms.append(vm_info)
+
+ except Exception as e:
+ print(utils.RED("Failure for %s" % str(e)))
+
+ def get_vm_duts(self):
+ duts = []
+ for vm_info in self.vms:
+ for vm_obj in list(vm_info.keys()):
+ if "session" in vm_obj:
+ duts.append(vm_info[vm_obj])
+
+ return duts
+
+ def create_scene(self):
+ self.prepare_vm()
+ self.run_pre_cmds()
+ self.start_vms()
+ self.run_post_cmds()
+ pass
+
+ def set_target(self, target):
+ for vm_info in self.vms:
+ for vm_obj in list(vm_info.keys()):
+ if "session" in vm_obj:
+ vm_info[vm_obj].set_target(target)
+
+ def destroy_scene(self):
+ for vm_info in self.vms:
+ for vm_obj in list(vm_info.keys()):
+ if "session" in vm_obj:
+ vm_info[vm_obj].kill_all()
+ vm_info[vm_obj].close()
+ vm_info[vm_obj].logger.logger_exit()
+ for vm_obj in list(vm_info.keys()):
+ if "session" not in vm_obj:
+ vm_info[vm_obj].stop()
+ vm_info[vm_obj] = None
+ self.cleanup_vm()
+
+
+if __name__ == "__main__":
+
+ class QEMUKvmTmp:
+ def __init__(self, dut, vm_name, suite_name):
+ print(vm_name)
+ print(suite_name)
+
+ def start(self):
+ print(self.__dict__)
+ return True
+
+ QEMUKvm = QEMUKvmTmp
+
+ class simple_dev(object):
+ def __init__(self, pci):
+ self.pci = pci
+ self.socket = 1
+
+ emu_dev1 = simple_dev("00:00.1")
+ emu_dev2 = simple_dev("00:00.2")
+ emu_dev3 = simple_dev("00:00.3")
+ emu_dev4 = simple_dev("00:00.4")
+
+ class simple_dut(object):
+ def __init__(self):
+ self.ports_info = [
+ {"vfs_port": [emu_dev1, emu_dev2]},
+ {"vfs_port": [emu_dev3, emu_dev4]},
+ ]
+ self.virt_pool = simple_resource()
+
+ def send_expect(
+ self, cmds, expected, timeout=5, alt_session=False, verify=False
+ ):
+ print(cmds + "---" + expected)
+
+ class simple_resource(object):
+ def __init__(self):
+ pass
+
+ def reserve_cpu(self, coremask):
+ print("reserve " + coremask)
+
+ def alloc_cpu(self, vm="", number=-1, socket=-1, corelist=None):
+ print("alloc %s num %d on socket %d" % (vm, number, socket))
+
+ dut = simple_dut()
+ scene = VirtScene(dut, None, "vf_passthrough")
+ scene.load_config()
+ scene.create_scene()
+ scene.destroy_scene()
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 13/23] dts: merge DTS nics/__init__.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (11 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 12/23] dts: merge DTS framework/virt_scene.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 14/23] dts: merge DTS nics/system_info.py " Juraj Linkeš
` (9 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/nics/__init__.py | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
create mode 100644 dts/nics/__init__.py
diff --git a/dts/nics/__init__.py b/dts/nics/__init__.py
new file mode 100644
index 0000000000..ae0043b7ef
--- /dev/null
+++ b/dts/nics/__init__.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python3
+# BSD LICENSE
+#
+# Copyright (c) 2021 PANTHEON.tech s.r.o.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of PANTHEON.tech s.r.o. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 14/23] dts: merge DTS nics/system_info.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (12 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 13/23] dts: merge DTS nics/__init__.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 15/23] dts: merge DTS framework/flow/__init__.py " Juraj Linkeš
` (8 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/nics/system_info.py | 144 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 144 insertions(+)
create mode 100644 dts/nics/system_info.py
diff --git a/dts/nics/system_info.py b/dts/nics/system_info.py
new file mode 100644
index 0000000000..5ecdd77dd6
--- /dev/null
+++ b/dts/nics/system_info.py
@@ -0,0 +1,144 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import time
+from collections import OrderedDict
+
+# install GitPython
+from git import Repo
+
+
+class SystemInfo(object):
+ def __init__(self, dut, pci_device_id):
+ self.dut = dut
+ self.pci_device_id = pci_device_id
+ self.session = self.dut.session
+ self.system_info = OrderedDict()
+ self.nic_info = OrderedDict()
+
+ def get_system_info(self):
+
+ board = self.session.send_expect("dmidecode -s system-product-name", "# ")
+ self.system_info["Board"] = board
+
+ processors = self.session.send_expect("dmidecode -s processor-version", "# ")
+ processor = processors.split("\r\n")[0]
+ self.system_info["CPU"] = processor
+
+ memories = self.session.send_expect("dmidecode -t memory", "]# ")
+ channels, size, speed = self._strip_memory(memories)
+ memory_info = "Total %d MBs in %d channels @ %s" % (size, channels, speed)
+ self.system_info["Memory"] = memory_info
+
+ release = self.session.send_expect(
+ "lsb_release -d |awk -F':' '{print $2}'", "# "
+ )
+ self.system_info["Operating system"] = release
+
+ kernel = self.session.send_expect("uname -r", "# ")
+ self.system_info["Linux kernel version"] = kernel
+
+ gcc_info = self.session.send_expect("gcc --version", "# ")
+ gcc = gcc_info.split("\r\n")[0]
+ self.system_info["GCC version"] = gcc
+
+ return self.system_info
+
+ def _strip_memory(self, memories):
+ """
+ Size: 8192 MB Locator: DIMM_A1 Speed: 2133 MHz
+ """
+ s_regex = r"(\s+)Size: (\d+) MB"
+ s1_regex = r"(\s+)Size: (\d+) GB"
+ l_regex = r"(\s+)Locator: .*_(\w+)"
+ speed_regex = r"(\s+)Speed: (.*)"
+ size = ""
+ locate = ""
+ speed = "Unknown"
+ memory_infos = []
+ memory_channel = set()
+ lines = memories.split("\r\n")
+ total_size = 0
+ for line in lines:
+ m = re.match(s_regex, line)
+ m1 = re.match(s1_regex, line)
+ if m:
+ size = m.group(2)
+ if m1:
+ size = int(m1.group(2)) * 1024
+ l_m = re.match(l_regex, line)
+ if l_m:
+ locate = l_m.group(2)
+ s_m = re.match(speed_regex, line)
+ if s_m:
+ speed = s_m.group(2)
+ if speed != "Unknown":
+ memory = {"Size": size, "Locate": locate, "Speed": speed}
+ memory_infos.append(memory)
+ speed = "Unknown"
+ total_size += int(size)
+ memory_channel.add(locate[0])
+
+ return len(memory_channel), total_size, memory_infos[0]["Speed"]
+
+ def get_nic_info(self):
+
+ cmd = "cat /sys/bus/pci/devices/%s/vendor" % self.pci_device_id
+ vendor = self.session.send_expect(cmd, "# ")
+ if "No such" in vendor:
+ return None
+
+ cmd = "cat /sys/bus/pci/devices/%s/device" % self.pci_device_id
+ device = self.session.send_expect(cmd, "# ")
+ if "No such" in device:
+ return None
+
+ cmd = "ls --color=never /sys/bus/pci/devices/%s/net" % self.pci_device_id
+ interface = self.session.send_expect(cmd, "# ")
+ if "No such" in interface:
+ return None
+ cmd = (
+ "ethtool -i %s | grep --color=never firmware |awk -F':' '{print $2}'"
+ % interface
+ )
+ firmware = self.session.send_expect(cmd, "# ")
+ if "No such" in firmware:
+ return None
+ cmd = (
+ "lspci -vmmks %s |grep -i ^device |awk -F':' '{print $2}'"
+ % self.pci_device_id
+ )
+ self.nic_info["nic_name"] = self.session.send_expect(cmd, "# ")
+ self.nic_info["device_id"] = vendor[2:] + ":" + device[2:]
+ self.nic_info["firmware-version"] = firmware
+ return self.nic_info
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 15/23] dts: merge DTS framework/flow/__init__.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (13 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 14/23] dts: merge DTS nics/system_info.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 16/23] dts: merge DTS framework/flow/enums.py " Juraj Linkeš
` (7 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/__init__.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 dts/framework/flow/__init__.py
diff --git a/dts/framework/flow/__init__.py b/dts/framework/flow/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 16/23] dts: merge DTS framework/flow/enums.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (14 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 15/23] dts: merge DTS framework/flow/__init__.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 17/23] dts: merge DTS framework/flow/exceptions.py " Juraj Linkeš
` (6 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/enums.py | 122 ++++++++++++++++++++++++++++++++++++
1 file changed, 122 insertions(+)
create mode 100644 dts/framework/flow/enums.py
diff --git a/dts/framework/flow/enums.py b/dts/framework/flow/enums.py
new file mode 100644
index 0000000000..4e08ac2ace
--- /dev/null
+++ b/dts/framework/flow/enums.py
@@ -0,0 +1,122 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from enum import Enum
+
+
+class FlowRuleType(Enum):
+ INGRESS = "ingress"
+ EGRESS = "egress"
+ BOTH = ""
+
+
+class FlowItemType(Enum):
+ UDP = "udp"
+ TCP = "tcp"
+ SCTP = "sctp"
+ IPV4 = "ipv4"
+ IPV6 = "ipv6"
+ END = "end"
+ VOID = "void"
+ INVERT = "invert"
+ ANY = "any"
+ RAW = "raw"
+ ETH = "eth"
+ VLAN = "vlan"
+ VXLAN = "vxlan"
+ GRE = "gre"
+ VXLAN_GPE = "vxlan_gpe"
+ ARP_ETH_IPV4 = "arp_eth_ipv4"
+ ICMP = "icmp"
+ ICMP6 = "icmp6"
+ MARK = "mark"
+ META = "meta"
+ TAG = "tag"
+ FUZZY = "fuzzy"
+
+
+class FlowActionType(Enum):
+ # "Simple" actions that don't need parameters
+ VOID = "void"
+ PASSTHRU = "passthru"
+ FLAG = "flag"
+ DROP = "drop"
+ COUNT = "count"
+ MAC_SWAP = "mac_swap"
+ DEC_TTL = "dec_ttl"
+
+ # Actions that do need parameters
+ JUMP = "jump"
+ MARK = "mark"
+ QUEUE = "queue"
+ RSS = "rss"
+ PF = "pf"
+ VF = "vf"
+ PHY_PORT = "phy_port"
+ PORT_ID = "port_id"
+ METER = "meter"
+ SECURITY = "security"
+ OF_SET_MPLS_TTL = "of_set_mpls_ttl"
+ OF_DEC_MPLS_TTL = "of_dec_mpls_ttl"
+ OF_SET_NW_TTL = "of_set_nw_ttl"
+ OF_DEC_NW_TTL = "of_dec_nw_ttl"
+ OF_COPY_TTL_OUT = "of_copy_ttl_out"
+ OF_COPY_TTL_IN = "of_copy_ttl_in"
+ OF_POP_VLAN = "of_pop_vlan"
+ OF_PUSH_VLAN = "of_push_vlan"
+ OF_SET_VLAN_VID = "of_set_vlan_vid"
+ OF_SET_VLAN_PCP = "of_set_vlan_pcp"
+ OF_POP_MPLS = "of_pop_mpls"
+ OF_PUSH_MPLS = "of_push_mpls"
+ VXLAN_ENCAP = "vxlan_encap"
+ VXLAN_DECAP = "vxlan_decap"
+ NVGRE_ENCAP = "nvgre_encap"
+ NVGRE_DECAP = "nvgre_decap"
+ RAW_ENCAP = "raw_encap"
+ RAW_DECAP = "raw_decap"
+ SET_IPV4_SRC = "set_ipv4_src"
+ SET_IPV4_DST = "set_ipv4_dst"
+ SET_IPV6_SRC = "set_ipv6_src"
+ SET_IPV6_DST = "set_ipv6_dst"
+ SET_TP_SRC = "set_tp_src"
+ SET_TP_DST = "set_tp_dst"
+ SET_TTL = "set_ttl"
+ SET_MAC_SRC = "set_mac_src"
+ SET_MAC_DST = "set_mac_dst"
+ INC_TCP_SEQ = "inc_tcp_seq"
+ DEC_TCP_SEQ = "dec_tcp_seq"
+ INC_TCP_ACK = "inc_tcp_ack"
+ DEC_TCP_ACK = "dec_tcp_ack"
+ SET_TAG = "set_tag"
+ SET_META = "set_meta"
+ SET_IPV4_DSCP = "set_ipv4_dscp"
+ SET_IPV6_DSCP = "set_ipv6_dscp"
+ AGE = "age"
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 17/23] dts: merge DTS framework/flow/exceptions.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (15 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 16/23] dts: merge DTS framework/flow/enums.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 18/23] dts: merge DTS framework/flow/flow.py " Juraj Linkeš
` (5 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/exceptions.py | 44 ++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 dts/framework/flow/exceptions.py
diff --git a/dts/framework/flow/exceptions.py b/dts/framework/flow/exceptions.py
new file mode 100644
index 0000000000..cf261307a8
--- /dev/null
+++ b/dts/framework/flow/exceptions.py
@@ -0,0 +1,44 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class CompositionException(Exception):
+ def __init__(self):
+ self.message = "There was an unexpected error in composition"
+
+
+class InvalidFlowItemException(CompositionException):
+ def __init__(self, first_item, second_item, flow=None):
+ if flow is not None:
+ self.message = f'"{first_item}" was not able to accept "{second_item}" as the next item in flow {flow}.'
+ else:
+ self.message = f'"{first_item}" was not able to accept "{second_item}".'
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 18/23] dts: merge DTS framework/flow/flow.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (16 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 17/23] dts: merge DTS framework/flow/exceptions.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:18 ` [RFC PATCH v1 19/23] dts: merge DTS framework/flow/flow_action_items.py " Juraj Linkeš
` (4 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/flow.py | 223 +++++++++++++++++++++++++++++++++++++
1 file changed, 223 insertions(+)
create mode 100644 dts/framework/flow/flow.py
diff --git a/dts/framework/flow/flow.py b/dts/framework/flow/flow.py
new file mode 100644
index 0000000000..e80e69a991
--- /dev/null
+++ b/dts/framework/flow/flow.py
@@ -0,0 +1,223 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import annotations
+
+import copy
+import itertools
+import operator
+from functools import reduce
+from typing import FrozenSet, Iterable, List, Tuple, Union
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+
+from .enums import FlowActionType, FlowItemType
+from .exceptions import InvalidFlowItemException
+from .flow_action_items import ActionFlowItem
+from .flow_items import FlowItem
+from .flow_pattern_items import TUNNELING_PROTOCOLS, PatternFlowItem
+
+# Get reserved mac addresses
+NEVER_MATCH_PACKET = Ether(src="", dst="") / Raw("\x00" * 64)
+
+
+def _iterable_deep_compare(i1, i2):
+ return reduce(lambda x, y: x and y, map(lambda x, y: x == y, i1, i2), True)
+
+
+def expand_pattern_list_with_iterable_replacing_item(
+ patterns: List[Iterable[FlowItem]],
+ it: Iterable[Tuple[FlowItem, FrozenSet[str], FrozenSet[str], str]],
+ item,
+):
+ """
+ This function takes a list of patterns and splits each of them into 2
+ parts, excluding the item at index. It then uses the provided
+ iterator to fill in that value for all patterns.
+
+ Ex:
+ if patterns is [['a', 'b', 'c'], ['c','b','a']], it is [1,2], and item is 'b',
+ then this function will produce
+
+ [['a', 1], ['a', 2], ['a', 1], ['a', 2], ['a', 1], ['a', 2]]
+
+ if everything is converted into a list. It is not converted
+ because that requires using the memory to store all of this at
+ the same time, which could be fairly large.
+ """
+
+ split_patterns = list(
+ map(
+ lambda pattern: (
+ pattern[: pattern.index(item)],
+ pattern[pattern.index(item) + 1 :],
+ ),
+ filter(lambda pattern: item in pattern, patterns),
+ )
+ )
+ # Tee the iterators so I can consume all of them
+
+ iterators = itertools.tee(it, len(patterns))
+ for pattern_before, pattern_after in split_patterns:
+ for iterator in iterators:
+ for dataset in iterator:
+ backup_dataset = copy.deepcopy(dataset)
+ yield (
+ [*pattern_before, backup_dataset[0], *pattern_after],
+ *backup_dataset[1:],
+ )
+ # yield from map(
+ # lambda flow_item_test_properties: (
+ # [*pattern_before, flow_item_test_properties[0], *pattern_after],
+ # *flow_item_test_properties[1:],
+ # ), iterator
+ # )
+
+ yield from filter(lambda pattern: item not in pattern, patterns)
+
+
+class Flow(object):
+ action_items: List[ActionFlowItem]
+ pattern_items: List[PatternFlowItem]
+ entry_points: FrozenSet[FlowItemType]
+
+ def __init__(
+ self,
+ action_items=None,
+ pattern_items=None,
+ ):
+ if action_items is None:
+ action_items = []
+
+ if pattern_items is None:
+ pattern_items = []
+
+ self.action_items = action_items
+ self.pattern_items = pattern_items
+
+ def __truediv__(self, item: Union[FlowItem, Flow]):
+ """
+ Used in a similar way to scapy's packet composition. Returns a new flow with the mutated state.
+ @param item: The other flow item.
+ @return: A Flow containing both items
+ """
+ if isinstance(item, Flow):
+ return Flow(
+ pattern_items=[*self.pattern_items, *item.pattern_items],
+ action_items=[*self.action_items, *item.action_items],
+ )
+ elif isinstance(item, PatternFlowItem):
+ if len(self.pattern_items) == 0:
+ return Flow(
+ pattern_items=[*self.pattern_items, item],
+ action_items=[*self.action_items],
+ )
+ elif item.type in self.pattern_items[-1].valid_next_items:
+ return Flow(
+ pattern_items=[*self.pattern_items, item],
+ action_items=[*self.action_items],
+ )
+ else:
+ raise InvalidFlowItemException(self.pattern_items[-1], item, flow=self)
+ elif isinstance(item, ActionFlowItem):
+ if len(self.action_items) == 0:
+ return Flow(
+ pattern_items=[*self.pattern_items],
+ action_items=[*self.action_items, item],
+ )
+
+ for action in self.action_items:
+ if item.type not in action.allowed_with:
+ raise InvalidFlowItemException(action, item, flow=self)
+ return Flow(
+ pattern_items=[*self.pattern_items],
+ action_items=[*self.action_items, item],
+ )
+
+ def __str__(self):
+ return f"ingress pattern %s actions queue index 1 / end" % (
+ " / ".join(str(item) for item in self.pattern_items) + " / end"
+ )
+
+ def __repr__(self):
+ return str(self)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Flow)
+ and len(self.action_items) == len(other.action_items)
+ and len(self.pattern_items) == len(other.pattern_items)
+ and _iterable_deep_compare(self.pattern_items, other.pattern_items)
+ and _iterable_deep_compare(self.action_items, other.action_items)
+ )
+
+ def to_scapy_packet(self):
+ return reduce(
+ operator.truediv, map(lambda x: x.to_scapy_packet(), self.pattern_items)
+ )
+
+ def get_test_property_flows(
+ self, pattern_item_types_to_update=None, action_item_types_to_update=None
+ ) -> Iterable[Flow]:
+ if pattern_item_types_to_update is None and action_item_types_to_update is None:
+ pattern_item_types_to_update = [self.pattern_items[-1]]
+ elif pattern_item_types_to_update is None:
+ pattern_item_types_to_update = []
+ elif action_item_types_to_update is None:
+ action_item_types_to_update = []
+
+ # So that if this object is mutated before the generator is finished, it won't change anything
+ base_pattern_items = copy.deepcopy(self.pattern_items)
+ base_action_items = copy.deepcopy(self.action_items)
+
+ test_flows: Iterable[Iterable[FlowItem]] = [base_pattern_items]
+
+ tunnelling_protocols = list(
+ filter(lambda i: type(i) in TUNNELING_PROTOCOLS, base_pattern_items)
+ )
+ if len(tunnelling_protocols) > 0:
+ test_flows = expand_pattern_list_with_iterable_replacing_item(
+ [*test_flows],
+ tunnelling_protocols[0].get_property_stream(),
+ tunnelling_protocols[0],
+ )
+ else:
+ test_flows = expand_pattern_list_with_iterable_replacing_item(
+ [*test_flows],
+ self.pattern_items[-1].get_property_stream(),
+ self.pattern_items[-1],
+ )
+ for pattern in test_flows:
+ yield Flow(
+ pattern_items=pattern[0], action_items=base_action_items
+ ), *pattern[1:]
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 19/23] dts: merge DTS framework/flow/flow_action_items.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (17 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 18/23] dts: merge DTS framework/flow/flow.py " Juraj Linkeš
@ 2022-04-06 15:18 ` Juraj Linkeš
2022-04-06 15:19 ` [RFC PATCH v1 20/23] dts: merge DTS framework/flow/flow_items.py " Juraj Linkeš
` (3 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:18 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/flow_action_items.py | 1597 +++++++++++++++++++++++
1 file changed, 1597 insertions(+)
create mode 100644 dts/framework/flow/flow_action_items.py
diff --git a/dts/framework/flow/flow_action_items.py b/dts/framework/flow/flow_action_items.py
new file mode 100644
index 0000000000..1542bd9225
--- /dev/null
+++ b/dts/framework/flow/flow_action_items.py
@@ -0,0 +1,1597 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from typing import Dict, FrozenSet, Tuple
+
+from .enums import FlowActionType
+from .flow_items import FlowItem
+
+ALWAYS_ALLOWED_ACTIONS = {FlowActionType.VOID}
+
+ENTRY_POINTS = {
+ FlowActionType.VOID,
+ FlowActionType.PASSTHRU,
+ FlowActionType.FLAG,
+ FlowActionType.DROP,
+ FlowActionType.COUNT,
+ FlowActionType.MAC_SWAP,
+ FlowActionType.DEC_TTL,
+ FlowActionType.JUMP,
+ FlowActionType.MARK,
+ FlowActionType.QUEUE,
+ FlowActionType.RSS,
+ FlowActionType.PF,
+ FlowActionType.VF,
+ FlowActionType.PHY_PORT,
+ FlowActionType.PORT_ID,
+ FlowActionType.SECURITY,
+ FlowActionType.OF_SET_MPLS_TTL,
+ FlowActionType.OF_DEC_MPLS_TTL,
+ FlowActionType.OF_SET_NW_TTL,
+ FlowActionType.OF_DEC_NW_TTL,
+ FlowActionType.OF_COPY_TTL_OUT,
+ FlowActionType.OF_COPY_TTL_IN,
+ FlowActionType.OF_POP_VLAN,
+ FlowActionType.OF_PUSH_VLAN,
+ FlowActionType.OF_SET_VLAN_VID,
+ FlowActionType.OF_SET_VLAN_PCP,
+ FlowActionType.OF_POP_MPLS,
+ FlowActionType.OF_PUSH_MPLS,
+ FlowActionType.VXLAN_ENCAP,
+ FlowActionType.VXLAN_DECAP,
+ FlowActionType.NVGRE_ENCAP,
+ FlowActionType.NVGRE_DECAP,
+ FlowActionType.RAW_ENCAP,
+ FlowActionType.RAW_DECAP,
+ FlowActionType.SET_IPV4_SRC,
+ FlowActionType.SET_IPV4_DST,
+ FlowActionType.SET_IPV6_SRC,
+ FlowActionType.SET_IPV6_DST,
+ FlowActionType.SET_TP_SRC,
+ FlowActionType.SET_TP_DST,
+ FlowActionType.SET_TTL,
+ FlowActionType.SET_MAC_SRC,
+ FlowActionType.SET_MAC_DST,
+ FlowActionType.INC_TCP_SEQ,
+ FlowActionType.DEC_TCP_SEQ,
+ FlowActionType.INC_TCP_ACK,
+ FlowActionType.DEC_TCP_ACK,
+ FlowActionType.SET_TAG,
+ FlowActionType.SET_META,
+ FlowActionType.SET_IPV4_DSCP,
+ FlowActionType.SET_IPV6_DSCP,
+ FlowActionType.AGE,
+}
+
+
+class ActionFlowItem(FlowItem):
+ allowed_with: FrozenSet[FlowActionType] = frozenset(
+ {item for item in FlowActionType}
+ )
+
+ valid_next_items: FrozenSet[FlowActionType] = frozenset(
+ {item for item in FlowActionType}
+ )
+
+ test_case: Dict[str, Tuple[str, frozenset, frozenset]] = dict()
+
+
+class FlowActionVoid(ActionFlowItem):
+ type = FlowActionType.VOID
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions void / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionPassthru(ActionFlowItem):
+ type = FlowActionType.PASSTHRU
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions passthru / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionFlag(ActionFlowItem):
+ type = FlowActionType.FLAG
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions flag / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionDrop(ActionFlowItem):
+ type = FlowActionType.DROP
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions drop / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionCount(ActionFlowItem):
+ type = FlowActionType.COUNT
+ test_case = {
+ "test_shared": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions count shared 0 id 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "test_id": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions count id 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionMac_swap(ActionFlowItem):
+ type = FlowActionType.MAC_SWAP
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions mac_swap / end",
+ frozenset(
+ {
+ 'Ether(src="90:61:ae:fd:41:43", dst = "ab:cd:ef:12:34:56") '
+ "/ IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether(src="90:61:ae:fd:41:43", dst = "ab:cd:ef:12:34:56") '
+ "/ IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ 'Ether(src="90:61:ae:fd:41:43", dst = "ab:cd:ef:12:34:56") '
+ "/ IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ 'Ether(src="90:61:ae:fd:41:43", dst = "ab:cd:ef:12:34:56") '
+ "/ IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ 'Ether(src="90:61:ae:fd:41:43", dst = "ab:cd:ef:12:34:56") '
+ "/ IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionDec_ttl(ActionFlowItem):
+ type = FlowActionType.DEC_TTL
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions dec_ttl / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\", ttl = 128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\", ttl = 128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\", ttl = 128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\", ttl = 128 ) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\", ttl = 128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionJump(ActionFlowItem):
+ type = FlowActionType.JUMP
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions jump group 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionMark(ActionFlowItem):
+ type = FlowActionType.MARK
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions mark id 0xABCDEF / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionQueue(ActionFlowItem):
+ type = FlowActionType.QUEUE
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions queue index 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionRss(ActionFlowItem):
+ type = FlowActionType.RSS
+
+ # RSS already has a test suite.
+ """
+ test_case = {
+ 'case1': ('ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions / end',
+ frozenset({"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}),
+ frozenset({"Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)"})),
+ }
+ """
+
+
+class FlowActionPf(ActionFlowItem):
+ type = FlowActionType.PF
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions pf / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionVf(ActionFlowItem):
+ type = FlowActionType.VF
+ test_case = {
+ "test_original": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 /"
+ " udp / end actions vf original 1/ end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "test_id": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions vf id 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionPhy_port(ActionFlowItem):
+ type = FlowActionType.PHY_PORT
+
+ test_case = {
+ # original port index
+ "test_original": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions phy_port original / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # physical port index
+ "test_index": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions phy_port index 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionPort_id(ActionFlowItem):
+ type = FlowActionType.PORT_ID
+
+ test_case = {
+ # original DPDK port ID
+ "test_original": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions port_id original / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # DPDK port ID
+ "test_id": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions port_id id 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionMeter(ActionFlowItem):
+ type = FlowActionType.METER
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions meter mtr_id 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSecurity(ActionFlowItem):
+ type = FlowActionType.SECURITY
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions security security_session 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_set_mpls_ttl(ActionFlowItem):
+ type = FlowActionType.OF_SET_MPLS_TTL
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_set_mpls_ttl mpls_ttl 64 / end",
+ frozenset(
+ {
+ 'Ether() / IP(src="192.168.0.1") / MPLS(label = 0xab, ttl=128)'
+ " / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ 'Ether() / IP(src="132.177.0.99") / MPLS(label = 0xab, ttl=128)'
+ " / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_dec_mpls_ttl(ActionFlowItem):
+ type = FlowActionType.OF_DEC_MPLS_TTL
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions of_dec_mpls_ttl / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_set_nw_ttl(ActionFlowItem):
+ type = FlowActionType.OF_SET_NW_TTL
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_set_nw_ttl nw_ttl 64 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\", ttl=128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_dec_nw_ttl(ActionFlowItem):
+ type = FlowActionType.OF_DEC_NW_TTL
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions of_dec_nw_ttl / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\", ttl=128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\", ttl=128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_copy_ttl_out(ActionFlowItem):
+ type = FlowActionType.OF_COPY_TTL_OUT
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_copy_ttl_out / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_copy_ttl_in(ActionFlowItem):
+ type = FlowActionType.OF_COPY_TTL_IN
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_copy_ttl_out / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_pop_vlan(ActionFlowItem):
+ type = FlowActionType.OF_POP_VLAN
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions of_pop_vlan / end",
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.1") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.2") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="10.0.30.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="8.8.8.8") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="132.177.0.99")'
+ " / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_push_vlan(ActionFlowItem):
+ type = FlowActionType.OF_PUSH_VLAN
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions of_push_vlan ethertype 0x8100 / end",
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.1") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.2") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="10.0.30.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="8.8.8.8") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="132.177.0.99")'
+ " / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_set_vlan_vid(ActionFlowItem):
+ type = FlowActionType.OF_SET_VLAN_VID
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_set_vlan_vid vlan_vid 0xbbb / end",
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.1")'
+ " / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.2") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="10.0.30.99") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="8.8.8.8") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="132.177.0.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_set_vlan_pcp(ActionFlowItem):
+ type = FlowActionType.OF_SET_VLAN_PCP
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions of_set_vlan_vid vlan_pcp 0x7 / end",
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.1") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="192.168.0.2") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="10.0.30.99") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="8.8.8.8") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / IP(src="132.177.0.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_pop_mpls(ActionFlowItem):
+ type = FlowActionType.OF_POP_MPLS
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions of_pop_mpls ethertype 0x0806 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionOf_push_mpls(ActionFlowItem):
+ type = FlowActionType.OF_PUSH_MPLS
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions of_push_mpls ethertype 0x0806 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / MPLS(label = 0xab, ttl=128) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionVxlan_encap(ActionFlowItem):
+ type = FlowActionType.VXLAN_ENCAP
+
+ test_case = {
+ # VXLAN encap definition is the VNI?
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions vxlan_encap definition 0x112233 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionVxlan_decap(ActionFlowItem):
+ type = FlowActionType.VXLAN_DECAP
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions vxlan_decap / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") / UDP() / VXLAN() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / VXLAN() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / VXLAN() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / VXLAN() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / VXLAN() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionNvgre_encap(ActionFlowItem):
+ type = FlowActionType.NVGRE_ENCAP
+ # NVGRE PACKETS NOT SUPPORTED BY SCAPY.
+ """
+ test_case = {
+ 'test': ('ingress pattern eth / ipv4 src is 192.168.0.1
+ / udp / end actions nvgre_encap definition 0x112233 / end',
+ frozenset({"Ether() / IP(src=\"192.168.0.1\") / UDP() / NVGRE() / Raw('\\x00' * 64)"}),
+ frozenset({"Ether() / IP(src=\"192.168.0.2\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / NVGRE() / Raw('\\x00' * 64)"})),
+ }
+ """
+
+
+class FlowActionNvgre_decap(ActionFlowItem):
+ type = FlowActionType.NVGRE_DECAP
+ # NVGRE PACKETS NOT SUPPORTED BY SCAPY.
+ """
+ test_case = {
+ 'test': ('ingress pattern eth / ipv4 src is 192.168.0.1 / udp / end actions nvgre_decap / end',
+ frozenset({"Ether() / IP(src=\"192.168.0.1\") / UDP() / NVGRE() / Raw('\\x00' * 64)"}),
+ frozenset({"Ether() / IP(src=\"192.168.0.2\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / NVGRE() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / NVGRE() / Raw('\\x00' * 64)"})),
+ }
+ """
+
+
+class FlowActionRaw_encap(ActionFlowItem):
+ type = FlowActionType.RAW_ENCAP
+ # Assume we are encapsulating with a VLAN header with the following values:
+ # TPID: 0x8100
+ # Prio: 0x5
+ # PCP: 0
+ # VID: 0xaaa
+ # This makes the full header: 0x8100aaaa
+ test_case = {
+ "test_data": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions raw_encap data 0x8100aaaa / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "test_preserve": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions raw_encap data 0x8100aaaa preserve 0xffffffff / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # Is "size" in bits or bytes? Unclear in documentation, defaulting to bits.
+ "test_size": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions raw_encap data 0x8100aaaa size 32 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionRaw_decap(ActionFlowItem):
+ type = FlowActionType.RAW_DECAP
+ test_case = {
+ "test_data": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions raw_decap data 0x8100aaaa / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="192.168.0.2")'
+ " / UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="10.0.30.99") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="8.8.8.8")'
+ " / UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="132.177.0.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # Is "size" in bits or bytes? Unclear in documentation, defaulting to bits.
+ "test_size": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions raw_decap data 0x8100aaaa size 32 / end",
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="192.168.0.1") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="192.168.0.2") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="10.0.30.99")'
+ " / UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="8.8.8.8") '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xbbb) / IP(src="132.177.0.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv4_src(ActionFlowItem):
+ type = FlowActionType.SET_IPV4_SRC
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_ipv4_src ipv4_addr 172.16.0.10 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv4_dst(ActionFlowItem):
+ type = FlowActionType.SET_IPV4_DST
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 dst is 192.168.0.1"
+ " / udp / end actions set_ipv4_dst ipv4_addr 172.16.0.10 / end",
+ frozenset(
+ {"Ether() / IP(dst=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(dst=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv6_src(ActionFlowItem):
+ type = FlowActionType.SET_IPV6_SRC
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv6 src is 2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2 "
+ "/ udp / end actions set_ipv6_src ipv6_addr 2001:0000:9d38:6ab8:1c48:9999:aaaa:bbbb",
+ frozenset(
+ {
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c3\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c4\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c5\") / UDP() / Raw('\x00' * 64)",
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c6") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv6_dst(ActionFlowItem):
+ type = FlowActionType.SET_IPV6_DST
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv6 dst is 2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2 "
+ "/ udp / end actions set_ipv6_dst ipv6_addr 2001:0000:9d38:6ab8:1c48:9999:aaaa:bbbb",
+ frozenset(
+ {
+ 'Ether() / IPv6(dst="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2")'
+ " / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c3\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c4\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c5\") / UDP() / Raw('\x00' * 64)",
+ 'Ether() / IPv6(dst="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c6") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_tp_src(ActionFlowItem):
+ type = FlowActionType.SET_TP_SRC
+
+ test_case = {
+ # UDP
+ "test_udp": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions set_tp_src port 1998 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") UDP(sport=3838) / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") UDP(sport=3838) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") UDP(sport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") UDP(sport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") UDP(sport=3838) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # TCP
+ "test_tcp": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions set_tp_src port 1998 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") TCP(sport=3838) / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") TCP(sport=3838) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") TCP(sport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") TCP(sport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") TCP(sport=3838) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_tp_dst(ActionFlowItem):
+ type = FlowActionType.SET_TP_DST
+
+ test_case = {
+ # UDP
+ "test_udp": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_tp_dst port 1998 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") UDP(dport=3838) / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") UDP(dport=3838) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") UDP(dport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") UDP(dport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") UDP(dport=3838) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # TCP
+ "test_tcp": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions set_tp_dst port 1998 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\") TCP(dport=3838) / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") TCP(dport=3838) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") TCP(dport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") TCP(dport=3838) / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") TCP(dport=3838) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ttl(ActionFlowItem):
+ type = FlowActionType.SET_TTL
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions set_ttl ttl_value 64 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\" , ttl=128 ) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\" , ttl=128 ) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\" , ttl=128 ) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\", ttl=128 ) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\", ttl=128 ) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_mac_src(ActionFlowItem):
+ type = FlowActionType.SET_MAC_SRC
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions set_mac_src mac_addr 10:20:30:40:50:60 / end",
+ frozenset(
+ {
+ 'Ether(src="90:61:ae:fd:41:43") / IP(src="192.168.0.1") / UDP() / Raw(\'\\x00\' * 64)'
+ }
+ ),
+ frozenset(
+ {
+ 'Ether(src="90:61:ae:fd:41:43") / IP(src="192.168.0.2") / UDP() / Raw(\'\\x00\' * 64)',
+ 'Ether(src="90:61:ae:fd:41:43") / IP(src="10.0.30.99") / UDP() / Raw(\'\\x00\' * 64)',
+ 'Ether(src="90:61:ae:fd:41:43") / IP(src="8.8.8.8") / UDP() / Raw(\'\\x00\' * 64)',
+ 'Ether(src="90:61:ae:fd:41:43") / IP(src="132.177.0.99") / UDP() / Raw(\'\\x00\' * 64)',
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_mac_dst(ActionFlowItem):
+ type = FlowActionType.SET_MAC_DST
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1"
+ " / udp / end actions set_mac_dst mac_addr 10:20:30:40:50:60 / end",
+ frozenset(
+ {
+ 'Ether(dst="90:61:ae:fd:41:43") / IP(src="192.168.0.1") '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether(dst="90:61:ae:fd:41:43") / IP(src="192.168.0.2") / UDP() / Raw(\'\\x00\' * 64)',
+ 'Ether(dst="90:61:ae:fd:41:43") / IP(src="10.0.30.99") / UDP() / Raw(\'\x00\' * 64)',
+ 'Ether(dst="90:61:ae:fd:41:43") / IP(src="8.8.8.8") / UDP() / Raw(\'\x00\' * 64)',
+ 'Ether(dst="90:61:ae:fd:41:43") / IP(src="132.177.0.99") '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionInc_tcp_seq(ActionFlowItem):
+ type = FlowActionType.INC_TCP_SEQ
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions inc_tcp_seq / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / TCP(seq=2) / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionDec_tcp_seq(ActionFlowItem):
+ type = FlowActionType.DEC_TCP_SEQ
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions dec_tcp_seq / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / TCP(seq=2) / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / TCP(seq=2) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionInc_tcp_ack(ActionFlowItem):
+ type = FlowActionType.INC_TCP_ACK
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions inc_tcp_ack / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / TCP(ack=2) / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionDec_tcp_ack(ActionFlowItem):
+ type = FlowActionType.DEC_TCP_ACK
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 / tcp / end actions dec_tcp_ack / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / TCP(ack=2) / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / TCP(ack=2) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_tag(ActionFlowItem):
+ type = FlowActionType.SET_TAG
+
+ test_case = {
+ "test_data": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_tag data 0xabc / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # bit-mask applies to "data"
+ "test_mask": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_tag data 0xabc mask 0xcba / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "test_index": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_tag data 0xabc index 1 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_meta(ActionFlowItem):
+ type = FlowActionType.SET_META
+
+ test_case = {
+ "test_data": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_meta data 0xabc / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # bit-mask applies to "data"
+ "test_mask": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_meta data 0xabc mask 0xcb / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv4_dscp(ActionFlowItem):
+ type = FlowActionType.SET_IPV4_DSCP
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions set_ipv4_dscp dscp 2 / end",
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.1\", tos = 0) / UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\", tos = 0) / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\", tos = 0) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\", tos = 0) / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\", tos = 0) / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionSet_ipv6_dscp(ActionFlowItem):
+ type = FlowActionType.SET_IPV6_DSCP
+
+ test_case = {
+ "test": (
+ "ingress pattern eth / ipv6 src is 2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2 "
+ "/ udp / end actions set_ipv6_dscp dscp 0x30",
+ frozenset(
+ {
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2", tc = 0) '
+ "/ UDP() / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c3", tc = 0) '
+ "/ UDP() / Raw('\\x00' * 64)",
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c4", tc = 0) '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c5", tc = 0) '
+ "/ UDP() / Raw('\x00' * 64)",
+ 'Ether() / IPv6(src="2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c6", tc = 0) '
+ "/ UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowActionAge(ActionFlowItem):
+ type = FlowActionType.AGE
+
+ test_case = {
+ "test_timeout": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions age timeout 128 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # 8 bits reserved, must be zero
+ "test_reserved": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions age timeout 128 reserved 0 / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # The user flow context, NULL means the rte_flow pointer.
+ "test_context": (
+ "ingress pattern eth / ipv4 src is 192.168.0.1 "
+ "/ udp / end actions age timeout 128 context NULL / end",
+ frozenset(
+ {"Ether() / IP(src=\"192.168.0.1\") / UDP() / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / IP(src=\"192.168.0.2\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"10.0.30.99\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"8.8.8.8\") / UDP() / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.0.99\") / UDP() / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+ACTION_ITEMS_TYPE_CLASS_MAPPING: Dict[FlowActionType, ActionFlowItem] = {
+ FlowActionType.PASSTHRU: FlowActionPassthru,
+ FlowActionType.FLAG: FlowActionFlag,
+ FlowActionType.DROP: FlowActionDrop,
+ FlowActionType.COUNT: FlowActionCount,
+ FlowActionType.MAC_SWAP: FlowActionMac_swap,
+ FlowActionType.DEC_TTL: FlowActionDec_ttl,
+ FlowActionType.JUMP: FlowActionJump,
+ FlowActionType.MARK: FlowActionMark,
+ FlowActionType.QUEUE: FlowActionQueue,
+ FlowActionType.RSS: FlowActionRss,
+ FlowActionType.PF: FlowActionPf,
+ FlowActionType.VF: FlowActionVf,
+ FlowActionType.PHY_PORT: FlowActionPhy_port,
+ FlowActionType.PORT_ID: FlowActionPort_id,
+ FlowActionType.METER: FlowActionMeter,
+ FlowActionType.SECURITY: FlowActionSecurity,
+ FlowActionType.OF_SET_MPLS_TTL: FlowActionOf_set_mpls_ttl,
+ FlowActionType.OF_DEC_MPLS_TTL: FlowActionOf_dec_mpls_ttl,
+ FlowActionType.OF_SET_NW_TTL: FlowActionOf_set_nw_ttl,
+ FlowActionType.OF_DEC_NW_TTL: FlowActionOf_dec_nw_ttl,
+ FlowActionType.OF_COPY_TTL_OUT: FlowActionOf_copy_ttl_out,
+ FlowActionType.OF_COPY_TTL_IN: FlowActionOf_copy_ttl_in,
+ FlowActionType.OF_POP_VLAN: FlowActionOf_pop_vlan,
+ FlowActionType.OF_PUSH_VLAN: FlowActionOf_push_vlan,
+ FlowActionType.OF_SET_VLAN_VID: FlowActionOf_set_vlan_vid,
+ FlowActionType.OF_SET_VLAN_PCP: FlowActionOf_set_vlan_pcp,
+ FlowActionType.OF_POP_MPLS: FlowActionOf_pop_mpls,
+ FlowActionType.OF_PUSH_MPLS: FlowActionOf_push_mpls,
+ FlowActionType.VXLAN_ENCAP: FlowActionVxlan_encap,
+ FlowActionType.VXLAN_DECAP: FlowActionVxlan_decap,
+ FlowActionType.NVGRE_ENCAP: FlowActionNvgre_encap,
+ FlowActionType.NVGRE_DECAP: FlowActionNvgre_decap,
+ FlowActionType.RAW_ENCAP: FlowActionRaw_encap,
+ FlowActionType.RAW_DECAP: FlowActionRaw_decap,
+ FlowActionType.SET_IPV4_SRC: FlowActionSet_ipv4_src,
+ FlowActionType.SET_IPV4_DST: FlowActionSet_ipv4_dst,
+ FlowActionType.SET_IPV6_SRC: FlowActionSet_ipv6_src,
+ FlowActionType.SET_IPV6_DST: FlowActionSet_ipv6_dst,
+ FlowActionType.SET_TP_SRC: FlowActionSet_tp_src,
+ FlowActionType.SET_TP_DST: FlowActionSet_tp_dst,
+ FlowActionType.SET_TTL: FlowActionSet_ttl,
+ FlowActionType.SET_MAC_SRC: FlowActionSet_mac_src,
+ FlowActionType.SET_MAC_DST: FlowActionSet_mac_dst,
+ FlowActionType.INC_TCP_SEQ: FlowActionInc_tcp_seq,
+ FlowActionType.DEC_TCP_SEQ: FlowActionDec_tcp_seq,
+ FlowActionType.INC_TCP_ACK: FlowActionInc_tcp_ack,
+ FlowActionType.DEC_TCP_ACK: FlowActionDec_tcp_ack,
+ FlowActionType.SET_TAG: FlowActionSet_tag,
+ FlowActionType.SET_META: FlowActionSet_meta,
+ FlowActionType.SET_IPV4_DSCP: FlowActionSet_ipv4_dscp,
+ FlowActionType.SET_IPV6_DSCP: FlowActionSet_ipv6_dscp,
+ FlowActionType.AGE: FlowActionAge,
+}
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 20/23] dts: merge DTS framework/flow/flow_items.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (18 preceding siblings ...)
2022-04-06 15:18 ` [RFC PATCH v1 19/23] dts: merge DTS framework/flow/flow_action_items.py " Juraj Linkeš
@ 2022-04-06 15:19 ` Juraj Linkeš
2022-04-06 15:19 ` [RFC PATCH v1 21/23] dts: merge DTS framework/flow/flow_pattern_items.py " Juraj Linkeš
` (2 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:19 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/flow_items.py | 128 +++++++++++++++++++++++++++++++
1 file changed, 128 insertions(+)
create mode 100644 dts/framework/flow/flow_items.py
diff --git a/dts/framework/flow/flow_items.py b/dts/framework/flow/flow_items.py
new file mode 100644
index 0000000000..e43614c587
--- /dev/null
+++ b/dts/framework/flow/flow_items.py
@@ -0,0 +1,128 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import annotations
+
+import copy
+import itertools
+from functools import reduce
+from typing import Any, Dict, FrozenSet, Hashable, Iterable, Set, Tuple, Union
+
+from .enums import FlowActionType, FlowItemType
+from .exceptions import InvalidFlowItemException
+
+PATTERN_ACTION_ITEMS = {
+ FlowItemType.INVERT,
+ FlowItemType.VOID,
+ FlowItemType.MARK,
+ FlowItemType.META,
+}
+
+
+class FlowItem(object):
+ type: Union[FlowItemType, FlowActionType]
+ # Defines what items this may not appear with
+ allowed_with: FrozenSet[Union[FlowItemType, FlowActionType]]
+ # OSI Model layer of the protocol
+ # This should be the lowest layer a protocol is used in, for example
+ # QUIC would be considered L5 since it needs to go after UDP (L4),
+ # even though it has capabilities in L6.
+ layer: int
+ valid_next_items: FrozenSet[Union[FlowItemType, FlowActionType]]
+
+ # Types subject to change, should only be accessed through
+ possible_properties: Dict[str, Tuple[str, FrozenSet[str], FrozenSet[str]]]
+ properties: str
+
+ def get_property_stream(
+ self,
+ ) -> Iterable[Tuple[FlowItem, FrozenSet[str], FrozenSet[str], str]]:
+ """
+ This function will return a generator that will provide all
+ configured property combinations.
+
+ This function will not mutate the instance it is called on.
+
+ @return: a generator that will provide all
+ permutations of possible properties this object has as a flow
+ item with properties
+ """
+ base_copy = copy.deepcopy(self)
+ for key, value in self.possible_properties.items():
+ new_copy = copy.deepcopy(base_copy)
+ new_copy.properties = value[0] # The properties string
+ yield new_copy, *value[1:], f"{self.type.value}_{key}"
+
+ def __init__(self):
+ self.properties = ""
+
+ def __truediv__(self, other: FlowItem):
+ """
+ Used in a similar way to scapy's packet composition.
+ @param other: The other flow item.
+ @return: A Flow containing both items
+ """
+ if type(self) != type(other):
+ raise InvalidFlowItemException(self, other)
+ elif other.type in self.valid_next_items:
+ # These imports are in here so there is no circular import
+ from framework.flow.flow_action_items import ActionFlowItem
+ from framework.flow.flow_pattern_items import PatternFlowItem
+
+ from .flow import Flow
+
+ if isinstance(self, PatternFlowItem):
+ return Flow(pattern_items=[self, other])
+ elif isinstance(self, ActionFlowItem):
+ return Flow(action_items=[self, other])
+ else:
+ raise TypeError(
+ f"{type(self):s} is not one of {PatternFlowItem:s}, {ActionFlowItem:s}."
+ )
+ else:
+ raise InvalidFlowItemException(self, other)
+
+ def __eq__(self, other) -> bool:
+ return (
+ type(self) == type(other)
+ and self.type == other.type
+ and self.properties == other.properties
+ )
+
+ def __str__(self):
+ if self.properties != "":
+ return self.properties
+ else:
+ return self.type.value
+
+ def __repr__(self):
+ return str(self)
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 21/23] dts: merge DTS framework/flow/flow_pattern_items.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (19 preceding siblings ...)
2022-04-06 15:19 ` [RFC PATCH v1 20/23] dts: merge DTS framework/flow/flow_items.py " Juraj Linkeš
@ 2022-04-06 15:19 ` Juraj Linkeš
2022-04-06 15:19 ` [RFC PATCH v1 22/23] dts: merge DTS framework/flow/flow_rule.py " Juraj Linkeš
2022-04-06 15:19 ` [RFC PATCH v1 23/23] dts: merge DTS framework/flow/generator.py " Juraj Linkeš
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:19 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/flow_pattern_items.py | 1219 ++++++++++++++++++++++
1 file changed, 1219 insertions(+)
create mode 100644 dts/framework/flow/flow_pattern_items.py
diff --git a/dts/framework/flow/flow_pattern_items.py b/dts/framework/flow/flow_pattern_items.py
new file mode 100644
index 0000000000..ccb019e765
--- /dev/null
+++ b/dts/framework/flow/flow_pattern_items.py
@@ -0,0 +1,1219 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Allows the type system to handle referencing a class inside it's definition
+from typing import Dict, FrozenSet, Iterable, List, Tuple
+
+from scapy.layers.inet import ICMP, IP, TCP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import ARP, GRE, Dot1Q, Ether
+from scapy.layers.sctp import SCTP
+from scapy.layers.vxlan import VXLAN
+from scapy.packet import Packet
+
+from .enums import FlowItemType
+from .exceptions import InvalidFlowItemException
+from .flow_items import FlowItem
+
+ALWAYS_ALLOWED_ITEMS = {FlowItemType.RAW, FlowItemType.VOID}
+L3_FLOW_TYPES = {FlowItemType.IPV4, FlowItemType.IPV6}
+L4_FLOW_ITEMS = {
+ FlowItemType.UDP,
+ FlowItemType.TCP,
+ FlowItemType.SCTP,
+ FlowItemType.GRE,
+}
+
+PATTERN_OPERATION_TYPES = {
+ FlowItemType.MARK,
+ FlowItemType.META,
+ FlowItemType.TAG,
+ FlowItemType.FUZZY,
+ FlowItemType.INVERT,
+}
+
+TUNNELING_PROTOCOL_TYPES = {
+ FlowItemType.VLAN,
+ FlowItemType.VXLAN,
+ FlowItemType.GRE,
+ FlowItemType.VXLAN_GPE,
+}
+
+
+class PatternFlowItem(FlowItem):
+ allowed_with: FrozenSet[FlowItemType] = frozenset({item for item in FlowItemType})
+
+ valid_next_items: List[FlowItemType] = [item for item in FlowItemType]
+
+ # Only used for building a tree upward
+ valid_parent_items: List[FlowItemType] = [item for item in FlowItemType]
+
+ possible_properties: List[Tuple[str, Iterable, Iterable]] = {}
+
+ def __truediv__(self, other: FlowItem):
+ """
+ Used in a similar way to scapy's packet composition.
+ @param other: The other flow item.
+ @return: A Flow containing both items
+ """
+ if other.type in self.valid_next_items or other.type == FlowItemType.END:
+ # This import is in here so there is no circular import
+ from .flow import Flow
+
+ return Flow(pattern_items=[self, other])
+ else:
+ raise InvalidFlowItemException(self, other)
+
+ # def to_scapy_packet(self):
+ # scapy_class: type = ITEM_TYPE_SCAPY_CLASS_MAPPING[self.type]
+
+
+class FlowItemEnd(PatternFlowItem):
+ type = FlowItemType.END
+ valid_next_items = list({})
+
+
+class FlowItemVoid(PatternFlowItem):
+ type = FlowItemType.VOID
+
+
+class FlowItemInvert(PatternFlowItem):
+ type = FlowItemType.INVERT
+
+
+class FlowItemAny(PatternFlowItem):
+ type = FlowItemType.ANY
+
+
+class FlowItemRaw(PatternFlowItem):
+ type = FlowItemType.RAW
+
+
+class FlowItemArp_eth_ipv4(PatternFlowItem):
+ type = FlowItemType.ARP_ETH_IPV4
+ valid_next_items = list({FlowItemType.RAW, FlowItemType.VOID})
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4]
+ """
+ - ``hdr``: hardware type, normally 1. => hwtype
+ - ``pro``: protocol type, normally 0x0800. => ptype = 2048
+ - ``hln``: hardware address length, normally 6. => hwlen
+ - ``pln``: protocol address length, normally 4. => plen
+ - ``op``: opcode (1 for request, 2 for reply). => op
+ - ``sha``: sender hardware address. => hwsrc
+ - ``spa``: sender IPv4 address => psrc
+ - ``tha``: target hardware address. => hwdst
+ - ``tpa``: target IPv4 address. => pdst
+ - Default ``mask`` matches SHA, SPA, THA and TPA.
+ """
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'hdr':
+ # ('arp_eth_ipv4 hdr is 1',
+ # frozenset({"Ether() / ARP(hwtype=1) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ARP(hwtype=2) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwtype=3) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwtype=6) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwtype-15) / Raw('\\x00' * 64)"
+ # })),
+ # 'pro':
+ # ('arp_eth_ipv4 pro is 0x0800',
+ # frozenset({"Ether() / ARP(ptype=0x0800) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ARP(ptype=0x0800) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(ptype=0x0842) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(ptype=0x6004) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(ptype=0x809b) / Raw('\\x00' * 64)"
+ # })),
+ #
+ # 'hln':
+ # ('arp_eth_ipv4 hln is 6',
+ # frozenset({"Ether() / ARP(hwlen=6) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ARP(hwlen=12) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwlen=2) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwlen=8) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(hwlen=4) / Raw('\\x00' * 64)"
+ # })),
+ #
+ # 'pln':
+ # ('arp_eth_ipv4 pln is 4',
+ # frozenset({"Ether() / ARP(plen=4) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ARP(plen=6) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(plen=2) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(plen=8) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(plen=12) / Raw('\\x00' * 64)"
+ # })),
+ #
+ # 'op':
+ # ('arp_eth_ipv4 op is 1',
+ # frozenset({"Ether() / ARP(op=1) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ARP(op=2) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(op=3) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(op=4) / Raw('\\x00' * 64)",
+ # "Ether() / ARP(op=5) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "sha": (
+ "arp_eth_ipv4 sha is 90:61:ae:fd:41:43",
+ frozenset(
+ {"Ether() / ARP(hwsrc=\"90:61:ae:fd:41:43\") / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / ARP(hwsrc=\"90:61:ae:fd:41:44\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwsrc=\"90:61:ae:fd:41:45\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwsrc=\"90:61:ae:fd:41:46\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwsrc=\"90:61:ae:fd:41:47\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "spa": (
+ "arp_eth_ipv4 spa is 192.168.0.80",
+ frozenset({"Ether() / ARP(psrc=\"192.168.0.80\") / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ARP(psrc=\"10.0.30.10\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(psrc=\"8.8.8.8\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(psrc=\"132.177.0.5\") / Raw('\\x00' * 64)",
+ "Ether() / ARP(psrc=\"123.4.5.6\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "tha": (
+ "arp_eth_ipv4 tha is 00:00:00:00:00:00",
+ frozenset({"Ether() / ARP(hwdst=00:00:00:00:00:00) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ARP(hwdst=90:61:ae:fd:41:45) / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwdst=90:61:ae:fd:41:46) / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwdst=90:61:ae:fd:41:47) / Raw('\\x00' * 64)",
+ "Ether() / ARP(hwdst=90:61:ae:fd:41:48) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "tpa": (
+ "arp_eth_ipv4 tpa is 192.168.0.1",
+ frozenset({"Ether() / ARP(pdst=192.168.0.1) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ARP(pdst=10.0.30.10) / Raw('\\x00' * 64)",
+ "Ether() / ARP(pdst=8.8.8.8) / Raw('\\x00' * 64)",
+ "Ether() / ARP(pdst=132.177.0.5) / Raw('\\x00' * 64)",
+ "Ether() / ARP(pdst=123.4.5.6) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemEth(PatternFlowItem):
+ type = FlowItemType.ETH
+ valid_next_items = list(
+ ALWAYS_ALLOWED_ITEMS
+ | L3_FLOW_TYPES
+ | {FlowItemType.VLAN, FlowItemType.ARP_ETH_IPV4}
+ )
+ valid_parent_items: List[FlowItemType] = list({})
+ # Matches an Ethernet header (not Ethernet frame).
+
+ """
+ - ``dst``: destination MAC.
+ - ``src``: source MAC.
+ - ``type``: EtherType or TPID. (TPID value is 0x8100, any others are normal EtherType)
+ - Default ``mask`` matches destination and source addresses only.
+ """
+ possible_properties = {
+ "dst": (
+ "eth dst is 90:61:ae:fd:41:43",
+ frozenset({"Ether(dst=\"90:61:ae:fd:41:43\") / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether(dst=\"90:61:ae:fd:41:44\") / Raw('\\x00' * 64)",
+ "Ether(dst=\"90:61:ae:fd:41:45\") / Raw('\\x00' * 64)",
+ "Ether(dst=\"90:61:ae:fd:41:46\") / Raw('\\x00' * 64)",
+ "Ether(dst=\"91:61:ae:fd:41:43\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "src": (
+ "eth src is 90:61:ae:fd:41:43",
+ frozenset({"Ether(src=\"90:61:ae:fd:41:43\") / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether(src=\"90:61:ae:fd:41:44\") / Raw('\\x00' * 64)",
+ "Ether(src=\"90:61:ae:fd:41:45\") / Raw('\\x00' * 64)",
+ "Ether(src=\"90:61:ae:fd:41:46\") / Raw('\\x00' * 64)",
+ "Ether(src=\"91:61:ae:fd:41:43\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "type": (
+ "eth type is 0x0800", # IPv4 EtherType
+ frozenset({"Ether(type=0x0800) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether(type=0x0842) / Raw('\\x00' * 64)",
+ "Ether(type=0x8100) / Raw('\\x00' * 64)", # Possibly a special case? TPID/VLAN
+ "Ether(type=0x9100) / Raw('\\x00' * 64)", # Possibly special, VLAN double tagging
+ "Ether(type=0x8863) / Raw('\\x00' * 64)",
+ "Ether(type=0x9000) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemGre(PatternFlowItem):
+ type = FlowItemType.GRE
+ valid_next_items = list(L3_FLOW_TYPES | ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4, FlowItemType.IPV6]
+ """
+ - ``c_rsvd0_ver``: checksum, reserved 0 and version.
+ - ``protocol``: protocol type.
+ - Default ``mask`` matches protocol only.
+ """
+ possible_properties = {
+ "c_rsvd0_ver": (
+ "gre c_rsvd0_ver is 0",
+ frozenset(
+ {"Ether() / GRE(chksum_present=0, version=0) / Raw('\\x00' * 64)"}
+ ),
+ frozenset(
+ {
+ "Ether() / GRE(chksum_present=1, version=0)) / Raw('\\x00' * 64)",
+ # this is the only other option
+ }
+ ),
+ ),
+ "protocol": (
+ "gre protocol is 0x0800",
+ frozenset({"Ether() / GRE(proto=0x0800) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / GRE(proto=0x0842) / Raw('\\x00' * 64)",
+ "Ether() / GRE(proto=0x8100) / Raw('\\x00' * 64)",
+ "Ether() / GRE(proto=0x0806) / Raw('\\x00' * 64)",
+ "Ether() / GRE(proto=0x809B) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemIcmp(PatternFlowItem):
+ type = FlowItemType.ICMP
+ valid_next_items = list({FlowItemType.RAW, FlowItemType.VOID})
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4]
+ """
+ - ``hdr``: ICMP header definition (``rte_icmp.h``).
+ This definition includes:
+ icmp_type (8 bits; for IPv4 echo request it's "8")
+ icmp_code (8 bits)
+ THE FOLLOWING ARE NOT SUPPORTED IN TESTPMD:
+ icmp_cksum (16 bits)
+ icmp_ident (16 bits)
+ icmp_seq_nb (16 bits)
+ - Default ``mask`` matches ICMP type and code only.
+ """
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'icmp_cksum':
+ # ('icmp cksum is 0x0800',
+ # frozenset({"Ether() / ICMP() / UDP() / Raw('\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ # "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ # "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ # "Ether() / ICMP() / UDP() / Raw('\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "icmp_type": (
+ "icmp type is 3",
+ frozenset({"Ether() / ICMP(type=3) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMP(type=3) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=11) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=13) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=0) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "icmp_code": (
+ "icmp type is 3 code is 3", # Assume type 3 code 3; code meanings/options are dependent on type.
+ frozenset({"Ether() / ICMP(type=3, code=3) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMP(type=3, code=0) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=3, code=2) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=11, code=1) / Raw('\\x00' * 64)",
+ "Ether() / ICMP(type=12, code=2) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "icmp_ident": (
+ "icmp ident is 0x0800",
+ frozenset({"Ether() / ICMP() / UDP() / Raw('\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ }
+ ),
+ ),
+ "icmp_seq": (
+ "icmp seq is 0x0800",
+ frozenset({"Ether() / ICMP(proto=0x0800) / UDP() / Raw('\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ "Ether() / ICMP() / UDP() / Raw('\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemIcmp6(PatternFlowItem):
+ type = FlowItemType.ICMP6
+ valid_next_items = list({FlowItemType.RAW, FlowItemType.VOID})
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV6]
+ """
+ - ``type``: ICMPv6 type.
+ - ``code``: ICMPv6 code.
+ - ``checksum``: ICMPv6 checksum.
+ - Default ``mask`` matches ``type`` and ``code``.
+ """
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'checksum':
+ # ('icmp6 cksum is 0x1234',
+ # frozenset({"Ether() / ICMPv6DestUnreach(cksum=0x1234) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / ICMPv6DestUnreach(cksum=0x4321) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0xffff) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0x1233) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0x1010) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "type": (
+ "icmp6 type is 1", # Destination Unreachable
+ frozenset({"Ether() / ICMPv6DestUnreach(type=1) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMPv6DestUnreach(type=128) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(type=129) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(type=3) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(type=135) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "code": ( # ICMP code is dependent on type; these are possible Destination Unreachable codes
+ "icmp6 code is 0",
+ frozenset({"Ether() / ICMPv6DestUnreach(code=0) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / ICMPv6DestUnreach(code=1) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(code=2) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(code=3) / Raw('\\x00' * 64)",
+ "Ether() / ICMPv6DestUnreach(code=4) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemIpv4(PatternFlowItem):
+ type = FlowItemType.IPV4
+ valid_next_items = list(L4_FLOW_ITEMS | {FlowItemType.ICMP} | ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.ETH, FlowItemType.GRE]
+ """
+ Note: IPv4 options are handled by dedicated pattern items.
+
+ - ``hdr``: IPv4 header definition (``rte_ip.h``).
+ - Default ``mask`` matches source and destination addresses only.
+ """
+
+ possible_properties = {
+ "tos": (
+ "ipv4 tos is 0",
+ frozenset({"Ether() / IP(tos=0) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP(tos=2) / Raw('\\x00' * 64)",
+ "Ether() / IP(tos=4) / Raw('\\x00' * 64)",
+ "Ether() / IP(tos=8) / Raw('\\x00' * 64)",
+ "Ether() / IP(tos=16) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "ttl": (
+ "ipv4 ttl is 64",
+ frozenset({"Ether() / IP(ttl=64) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP(ttl=128) / Raw('\\x00' * 64)",
+ "Ether() / IP(ttl=255) / Raw('\\x00' * 64)",
+ "Ether() / IP(ttl=32) / Raw('\\x00' * 64)",
+ "Ether() / IP(ttl=100) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "proto": (
+ "ipv4 proto is 0x06", # TCP
+ frozenset({"Ether() / IP(proto=0x06) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP(proto=0x01) / Raw('\\x00' * 64)",
+ "Ether() / IP(proto=0x11) / Raw('\\x00' * 64)",
+ "Ether() / IP(proto=0x12) / Raw('\\x00' * 64)",
+ "Ether() / IP(proto=0x58) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "src": (
+ "ipv4 src is 192.168.0.5",
+ frozenset({"Ether() / IP(src=\"192.168.0.5\") / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP(src=\"10.10.10.10\") / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"132.177.127.6\") / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"192.168.0.4\") / Raw('\\x00' * 64)",
+ "Ether() / IP(src=\"192.168.0.250\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dst": (
+ "ipv4 dst is 192.168.0.5",
+ frozenset({"Ether() / IP(dst=\"192.168.0.5\") / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP(dst=\"10.10.10.10\") / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"132.177.127.6\") / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"192.168.0.4\") / Raw('\\x00' * 64)",
+ "Ether() / IP(dst=\"192.168.0.250\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ # CHECKSUM PROPERTY NOT SUPPORTED BY TESTPMD; DO NOT UNCOMMENT UNTIL SUPPORTED
+ # 'checksum':
+ # ('ipv4 chksum is 0x1234',
+ # frozenset({"Ether() / ICMPv6DestUnreach(cksum=0x1234) / Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / ICMPv6DestUnreach(cksum=0x4321) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0xffff) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0x1233) / Raw('\\x00' * 64)",
+ # "Ether() / ICMPv6DestUnreach(cksum=0x1010) / Raw('\\x00' * 64)"
+ # })),
+ ##########################################################################
+ }
+
+
+class FlowItemIpv6(PatternFlowItem):
+ type = FlowItemType.IPV6
+ valid_next_items = list(L4_FLOW_ITEMS | {FlowItemType.ICMP6} | ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.ETH, FlowItemType.GRE]
+ """
+ Note: IPv6 options are handled by dedicated pattern items, see `Item:
+ IPV6_EXT`_.
+
+ - ``hdr``: IPv6 header definition (``rte_ip.h``).
+ - Default ``mask`` matches source and destination addresses only.
+ """
+
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'vtc_flow':
+ # ('ipv6 vtc_flow is 0x0',
+ # frozenset({"Ether() / IPv6(tc=0, fl=0, version=0) / Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / IPv6(tc=1, fl=0, version=0) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(tc=0, fl=0xABCD, version=0) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(tc=0, fl=0, version=1) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(tc=6, fl=0x9999, version=1) / Raw('\\x00' * 64)"
+ # })),
+ # 'payload_len':
+ # ('ipv6 payload_len is 64',
+ # frozenset({"Ether() / IPv6(plen=64) / Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / IPv6(plen=32) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(plen=128) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(plen=5000) / Raw('\\x00' * 64)",
+ # "Ether() / IPv6(plen=4) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "tc": (
+ "ipv6 tc is 0",
+ frozenset({"Ether() / IPv6(tc=0) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IPv6(tc=1) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(tc=2) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(tc=4) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(tc=6) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "flow": (
+ "ipv6 flow is 0xABCD",
+ frozenset({"Ether() / IPv6(fl=0xABCD) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IPv6(fl=0xABCE) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(fl=0x0001) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(fl=0xFFFF) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(fl=0x1234) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "proto": ( # next header (nh)
+ "ipv6 proto is 6", # TCP
+ frozenset({"Ether() / IPv6(nh=6) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IPv6(nh=17) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(nh=41) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(nh=0) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(nh=60) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "hop": ( # hop limit
+ "ipv6 hop is 64",
+ frozenset({"Ether() / IPv6(hlim=64) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IPv6(hlim=128) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(hlim=32) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(hlim=255) / Raw('\\x00' * 64)",
+ "Ether() / IPv6(hlim=100) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dst": (
+ "ipv6 dst is 2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2",
+ frozenset(
+ {
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2\") / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c3\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c4\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c5\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(dst=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c6\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "src": (
+ "ipv6 src is 2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2",
+ frozenset(
+ {
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c2\") / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c3\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c4\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c5\") / Raw('\\x00' * 64)",
+ "Ether() / IPv6(src=\"2001:0000:9d38:6ab8:1c48:3a1c:a95a:b1c6\") / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemSctp(PatternFlowItem):
+ type = FlowItemType.SCTP
+ valid_next_items = list(ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4, FlowItemType.IPV6]
+ """
+
+ **chunks?
+ - ``hdr``: SCTP header definition (``rte_sctp.h``).
+ - Default ``mask`` matches source and destination ports only.
+ """
+ possible_properties = {
+ "src": (
+ "sctp src is 3838",
+ frozenset({"Ether() / IP() / SCTP(sport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / SCTP(sport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(sport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(sport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(sport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dst": (
+ "sctp dst is 3838",
+ frozenset({"Ether() / IP() / SCTP(dport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / SCTP(dport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(dport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(dport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(dport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "tag": (
+ "sctp tag is 12345",
+ frozenset({"Ether() / IP() / SCTP(tag=12345) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / SCTP(tag=12346) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(tag=12) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(tag=9999) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(tag=42) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "cksum": (
+ "sctp cksum is 0x01535b67",
+ frozenset({"Ether() / IP() / SCTP(chksum=0x01535b67) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / SCTP(chksum=0x01535b68) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(chksum=0xdeadbeef) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(chksum=0x12345678) / Raw('\\x00' * 64)",
+ "Ether() / IP() / SCTP(chksum=0x385030fe) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemTcp(PatternFlowItem):
+ type = FlowItemType.TCP
+ valid_next_items = list(ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4, FlowItemType.IPV6]
+ """
+ - ``hdr``: TCP header definition (``rte_tcp.h``).
+ - Default ``mask`` matches source and destination ports only.
+
+ #define RTE_TCP_CWR_FLAG 0x80
+
+ #define RTE_TCP_ECE_FLAG 0x40
+
+ #define RTE_TCP_URG_FLAG 0x20
+
+ #define RTE_TCP_ACK_FLAG 0x10
+
+ #define RTE_TCP_PSH_FLAG 0x08
+
+ #define RTE_TCP_RST_FLAG 0x04
+
+ #define RTE_TCP_SYN_FLAG 0x02
+
+ #define RTE_TCP_FIN_FLAG 0x01
+
+ Can we set multiple flags at once in testing (ex. SYN, ACK)?
+ Probably, and we can definitely test them if necessary.
+ """
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'data_off':
+ # ('tcp data_off is 0',
+ # frozenset({"Ether() / IP() / TCP(dataofs=0) / Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / IP() / TCP(dataofs=1) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(dataofs=2) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(dataofs=3) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(dataofs=4) / Raw('\\x00' * 64)"
+ # })),
+ # 'rx_win':
+ # ('tcp rx_win is 64',
+ # frozenset({"Ether() / IP() / TCP(window=64)/ Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / IP() / TCP(window=16)/ Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(window=128) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(window=32) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(window=255) / Raw('\\x00' * 64)"
+ # })),
+ # 'cksum':
+ # ('tcp cksum is 0x1234',
+ # frozenset({"Ether() / IP() / TCP(chksum=0x1234) / Raw('\\x00' * 64)"}),
+ # frozenset({"Ether() / IP() / TCP(chksum=0x4321) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(chksum=0xffff) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(chksum=0x9999) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / TCP(chksum=0x1233) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "src": (
+ "tcp src is 3838",
+ frozenset({"Ether() / IP() / TCP(sport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / TCP(sport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(sport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(sport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(sport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dst": (
+ "tcp dst is 3838",
+ frozenset({"Ether() / IP() / TCP(dport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / TCP(dport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(dport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(dport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(dport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "flags": (
+ "tcp flags is 0x02",
+ frozenset({"Ether() / IP() / TCP(flags=0x02) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / TCP(flags=0x01) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(flags=0x04) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(flags=0x08) / Raw('\\x00' * 64)",
+ "Ether() / IP() / TCP(flags=0x10) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemUdp(PatternFlowItem):
+ type = FlowItemType.UDP
+ valid_next_items = list(
+ {FlowItemType.VXLAN, FlowItemType.VXLAN_GPE} | ALWAYS_ALLOWED_ITEMS
+ )
+ valid_parent_items: List[FlowItemType] = [FlowItemType.IPV4, FlowItemType.IPV6]
+ """
+ - ``hdr``: UDP header definition (``rte_udp.h``).
+ - Default ``mask`` matches source and destination ports only.
+ """
+
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY MAY BE RE-ENABLED ONCE TESTPMD SUPPORTS THEIR USE
+ # 'dgram_len':
+ # ('udp dgram_len is 64',
+ # frozenset({"Ether() / IP() / UDP(len=64) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / UDP(len=128) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(len=32) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(len=16) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(len=255) / Raw('\\x00' * 64)"
+ # })),
+ # 'dgram_cksum':
+ # ('udp dgram_cksum is 0x1234',
+ # frozenset({"Ether() / IP() / UDP(chksum=0x1234) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / UDP(chksum=0x4321) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(chksum=0xffff) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(chksum=0x9999) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / UDP(chksum=0x1233) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "src": (
+ "udp src is 3838",
+ frozenset({"Ether() / IP() / UDP(sport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / UDP(sport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(sport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(sport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(sport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dst": (
+ "udp dst is 3838",
+ frozenset({"Ether() / IP() / UDP(dport=3838) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / UDP(dport=3939) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(dport=5000) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(dport=1998) / Raw('\\x00' * 64)",
+ "Ether() / IP() / UDP(dport=1028) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemVlan(PatternFlowItem):
+ type = FlowItemType.VLAN
+ valid_next_items = list(ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.ETH]
+ """
+ The corresponding standard outer EtherType (TPID) values are
+ ``RTE_ETHER_TYPE_VLAN`` or ``RTE_ETHER_TYPE_QINQ``. It can be overridden by the
+ preceding pattern item.
+ If a ``VLAN`` item is present in the pattern, then only tagged packets will
+ match the pattern.
+
+ - ``tci``: tag control information.
+ - ``inner_type``: inner EtherType or TPID.
+ - Default ``mask`` matches the VID part of TCI only (lower 12 bits).
+
+ tci in testpmd = pcp, dei, and vid, altogether.
+
+ pcp in testpmd = prio in scapy
+ dei in testpmd = id in scapy?
+ vid in testpmd = vlan in scapy
+
+ tpid in testpmd = type in scapy
+ """
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY MAY BE RE-ENABLED ONCE TESTPMD SUPPORTS THEIR USE
+ # 'tpid':
+ # ('vlan tpid is 0x8100', # standard value
+ # frozenset({"Ether() / Dot1Q(type=0x8100) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / Dot1Q(type=0x0800) / Raw('\\x00' * 64)",
+ # "Ether() / Dot1Q(type=0x0842) / Raw('\\x00' * 64)",
+ # "Ether() / Dot1Q(type=0x809b) / Raw('\\x00' * 64)",
+ # "Ether() / Dot1Q(type=0x86dd) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "tci": (
+ "vlan tci is 0xaaaa",
+ frozenset(
+ {
+ "Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xaaa) / Raw('\\x00' * 64)"
+ }
+ ),
+ frozenset(
+ {
+ "Ether() / Dot1Q(prio = 0x0, id = 0x1, vlan = 0xbbb) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio = 0x5, id = 0x0, vlan = 0xccc) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio = 0x5, id = 0x1, vlan = 0xaaa) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio = 0x4, id = 0x0, vlan = 0xaaa) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "pcp": (
+ "vlan pcp is 0x0",
+ frozenset({"Ether() / Dot1Q(prio=0x0) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / Dot1Q(prio=0x1) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio=0x2) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio=0x3) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(prio=0x7) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ "dei": (
+ "vlan dei is 0",
+ frozenset({"Ether() / Dot1Q(id=0) / Raw('\\x00' * 64)"}),
+ frozenset({"Ether() / Dot1Q(id=1) / Raw('\\x00' * 64)"}),
+ ),
+ "vid": (
+ "vlan vid is 0xabc",
+ frozenset({"Ether() / Dot1Q(vlan=0xabc) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / Dot1Q(vlan=0xaaa) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(vlan=0x123) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(vlan=0x1f5) / Raw('\\x00' * 64)",
+ "Ether() / Dot1Q(vlan=0x999) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemVxlan(PatternFlowItem):
+ type = FlowItemType.VXLAN
+ valid_next_items = frozenset({FlowItemType.ETH} | ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: FrozenSet[FlowItemType] = frozenset({FlowItemType.UDP})
+ """
+ - ``flags``: normally 0x08 (I flag).
+ - ``rsvd0``: reserved, normally 0x000000.
+ - ``vni``: VXLAN network identifier.
+ - ``rsvd1``: reserved, normally 0x00.
+ - Default ``mask`` matches VNI only.
+
+ TESTPMD ONLY SUPPORTS VNI.
+ """
+
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'rsvd0':
+ # ('vxlan rsvd0 is 0x000000',
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=0) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=1) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=2) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=3) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=4) / Raw('\\x00' * 64)"
+ # })),
+ # 'rsvd1':
+ # ('vxlan rsvd1 is 0x00',
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=0) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=1) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=2) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=3) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=4) / Raw('\\x00' * 64)"
+ # })),
+ # 'flags':
+ # ('vxlan flags is 0x08',
+ # frozenset({"Ether() / IP() / VXLAN(flags=0x08) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(flags=0x80) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x00) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x99) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x01) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "vni": ( # a 3-byte value
+ "vxlan vni is 0x112233",
+ frozenset({"Ether() / IP() / VXLAN(vni=0x112233) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / VXLAN(vni=0x112234) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0x123456) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0xaabbcc) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0x999999) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemVxlan_gpe(PatternFlowItem):
+ type = FlowItemType.VXLAN_GPE
+ valid_next_items = list({FlowItemType.ETH} | ALWAYS_ALLOWED_ITEMS)
+ valid_parent_items: List[FlowItemType] = [FlowItemType.UDP]
+ """
+ - ``flags``: normally 0x0C (I and P flags).
+ - ``rsvd0``: reserved, normally 0x0000.
+ - ``protocol``: protocol type. => NextProtocol?
+ - ``vni``: VXLAN network identifier.
+ - ``rsvd1``: reserved, normally 0x00.
+ - Default ``mask`` matches VNI only.
+
+ NOT CURRENTLY SUPPORTED BY TESTPMD.
+ """
+
+ possible_properties = {
+ # THE FOLLOWING PROPERTIES ARE UNSUPPORTED BY TESTPMD AT THE TIME OF WRITING.
+ # THEY CAN BE ENABLED ONCE TESTPMD SUPPORTS THEM
+ # 'rsvd0':
+ # ('vxlan rsvd0 is 0x000000',
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=0) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=1) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=2) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=3) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=4) / Raw('\\x00' * 64)"
+ # })),
+ # 'rsvd1':
+ # ('vxlan rsvd1 is 0x00',
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=0) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(reserved0=1) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=2) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=3) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(reserved0=4) / Raw('\\x00' * 64)"
+ # })),
+ # 'flags':
+ # ('vxlan flags is 0x08',
+ # frozenset({"Ether() / IP() / VXLAN(flags=0x08) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(flags=0x80) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x00) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x99) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(flags=0x01) / Raw('\\x00' * 64)"
+ # })),
+ # 'protocol':
+ # ('vxlan protocol is 0x01',
+ # frozenset({"Ether() / IP() / VXLAN(NextProtocol=0x01) / Raw('\\x00' * 64)"}),
+ #
+ # frozenset({"Ether() / IP() / VXLAN(NextProtocol=0x01) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(NextProtocol=0x11) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(NextProtocol=0x22) / Raw('\\x00' * 64)",
+ # "Ether() / IP() / VXLAN(NextProtocol=0x33) / Raw('\\x00' * 64)"
+ # })),
+ # END UNSUPPORTED PROPERTIES
+ "vni": ( # a 3-byte value
+ "vxlan vni is 0x112233",
+ frozenset({"Ether() / IP() / VXLAN(vni=0x112233) / Raw('\\x00' * 64)"}),
+ frozenset(
+ {
+ "Ether() / IP() / VXLAN(vni=0x112234) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0x123456) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0xaabbcc) / Raw('\\x00' * 64)",
+ "Ether() / IP() / VXLAN(vni=0x999999) / Raw('\\x00' * 64)",
+ }
+ ),
+ ),
+ }
+
+
+class FlowItemFuzzy(PatternFlowItem):
+ type = FlowItemType.FUZZY
+ layer = 1 # This field needs to go before ethernet, and we ignore layer 1 in these filters
+ valid_next_items = list({FlowItemType.ETH, FlowItemType.RAW, FlowItemType.VOID})
+ """
+ +----------+---------------+--------------------------------------------------+
+ | Field | Subfield | Value |
+ +==========+===============+==================================================+
+ | ``spec`` | ``threshold`` | 0 as perfect match, 0xffffffff as fuzziest match |
+ +----------+---------------+--------------------------------------------------+
+ | ``last`` | ``threshold`` | upper range value |
+ +----------+---------------+--------------------------------------------------+
+ | ``mask`` | ``threshold`` | bit-mask apply to "spec" and "last" |
+ +----------+---------------+--------------------------------------------------+
+ """
+
+
+class FlowItemMark(PatternFlowItem):
+ type = FlowItemType.MARK
+ """
+ +----------+----------+---------------------------+
+ | Field | Subfield | Value |
+ +==========+==========+===========================+
+ | ``spec`` | ``id`` | integer value |
+ +----------+--------------------------------------+
+ | ``last`` | ``id`` | upper range value |
+ +----------+----------+---------------------------+
+ | ``mask`` | ``id`` | zeroed to match any value |
+ +----------+----------+---------------------------+
+ """
+
+
+class FlowItemMeta(PatternFlowItem):
+ type = FlowItemType.META
+ """
+ Matches an application specific 32 bit metadata item.
+
+ - Default ``mask`` matches the specified metadata value.
+ """
+
+
+class FlowItemTag(PatternFlowItem):
+ type = FlowItemType.TAG
+ """
+ Matches tag item set by other flows. Multiple tags are supported by specifying
+ ``index``.
+
+ - Default ``mask`` matches the specified tag value and index.
+ +----------+----------+----------------------------------------+
+ | Field | Subfield | Value |
+ +==========+===========+=======================================+
+ | ``spec`` | ``data`` | 32 bit flow tag value |
+ | +-----------+---------------------------------------+
+ | | ``index`` | index of flow tag |
+ +----------+-----------+---------------------------------------+
+ | ``last`` | ``data`` | upper range value |
+ | +-----------+---------------------------------------+
+ | | ``index`` | field is ignored |
+ +----------+-----------+---------------------------------------+
+ | ``mask`` | ``data`` | bit-mask applies to "spec" and "last" |
+ | +-----------+---------------------------------------+
+ | | ``index`` | field is ignored |
+ +----------+-----------+---------------------------------------+
+ """
+
+
+PATTERN_ITEMS_TYPE_CLASS_MAPPING: Dict[FlowItemType, PatternFlowItem] = {
+ FlowItemType.UDP: FlowItemUdp,
+ FlowItemType.TCP: FlowItemTcp,
+ FlowItemType.SCTP: FlowItemSctp,
+ FlowItemType.IPV4: FlowItemIpv4,
+ FlowItemType.IPV6: FlowItemIpv6,
+ FlowItemType.ETH: FlowItemEth,
+ FlowItemType.VLAN: FlowItemVlan,
+ FlowItemType.VXLAN: FlowItemVxlan,
+ FlowItemType.GRE: FlowItemGre,
+ FlowItemType.VXLAN_GPE: FlowItemVxlan_gpe,
+ FlowItemType.ARP_ETH_IPV4: FlowItemArp_eth_ipv4,
+ FlowItemType.ICMP: FlowItemIcmp,
+ FlowItemType.ICMP6: FlowItemIcmp6,
+ FlowItemType.MARK: FlowItemMark,
+ FlowItemType.META: FlowItemMeta,
+ FlowItemType.TAG: FlowItemTag,
+ FlowItemType.FUZZY: FlowItemFuzzy,
+ FlowItemType.END: FlowItemEnd,
+ FlowItemType.VOID: FlowItemVoid,
+ FlowItemType.INVERT: FlowItemInvert,
+ FlowItemType.ANY: FlowItemAny,
+ FlowItemType.RAW: FlowItemRaw,
+}
+
+ITEM_TYPE_SCAPY_CLASS_MAPPING: Dict[FlowItemType, Packet] = {
+ FlowItemType.UDP: UDP,
+ FlowItemType.TCP: TCP,
+ FlowItemType.SCTP: SCTP,
+ FlowItemType.IPV4: IP,
+ FlowItemType.IPV6: IPv6,
+ FlowItemType.ETH: Ether,
+ FlowItemType.VLAN: Dot1Q,
+ FlowItemType.VXLAN: VXLAN,
+ FlowItemType.GRE: GRE,
+ FlowItemType.VXLAN_GPE: VXLAN,
+ FlowItemType.ARP_ETH_IPV4: ARP, # The type rules prevent this from being under anything except Ether / IPv4
+ FlowItemType.ICMP: ICMP,
+ FlowItemType.ICMP6: ICMP,
+ FlowItemType.MARK: None,
+ FlowItemType.META: None,
+ FlowItemType.TAG: None,
+ FlowItemType.FUZZY: None,
+ FlowItemType.END: None,
+ FlowItemType.VOID: None,
+ FlowItemType.INVERT: None,
+ FlowItemType.ANY: None,
+ FlowItemType.RAW: None,
+}
+
+TUNNELING_PROTOCOLS = {FlowItemVlan, FlowItemVxlan, FlowItemGre, FlowItemVxlan_gpe}
+
+PATTERN_OPERATIONS = {
+ FlowItemMark,
+ FlowItemMeta,
+ FlowItemTag,
+ FlowItemFuzzy,
+ FlowItemInvert,
+}
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 22/23] dts: merge DTS framework/flow/flow_rule.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (20 preceding siblings ...)
2022-04-06 15:19 ` [RFC PATCH v1 21/23] dts: merge DTS framework/flow/flow_pattern_items.py " Juraj Linkeš
@ 2022-04-06 15:19 ` Juraj Linkeš
2022-04-06 15:19 ` [RFC PATCH v1 23/23] dts: merge DTS framework/flow/generator.py " Juraj Linkeš
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:19 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/flow_rule.py | 65 +++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
create mode 100644 dts/framework/flow/flow_rule.py
diff --git a/dts/framework/flow/flow_rule.py b/dts/framework/flow/flow_rule.py
new file mode 100644
index 0000000000..6687bffe57
--- /dev/null
+++ b/dts/framework/flow/flow_rule.py
@@ -0,0 +1,65 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from typing import Union
+
+import framework.flow.flow_action_items as flow_action_items
+
+from .enums import *
+from .flow import Flow
+
+
+class FlowPattern(Flow):
+ entry_points = {FlowItemType.ETH, FlowItemType.FUZZY}
+
+ def __str__(self):
+ return f"pattern {super(FlowPattern, self).__str__()} / end"
+
+
+class FlowActions(Flow):
+ entry_points = flow_action_items.ENTRY_POINTS
+
+ def __str__(self):
+ return f"action {super(FlowActions, self).__str__()} / end"
+
+
+class FlowRule(object):
+ port: int
+ group: Union[int, None]
+ priority: Union[int, None]
+
+ ingress: bool
+ egress: bool
+ transfer: bool
+
+ pattern: FlowPattern
+ actions: FlowActions
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread
* [RFC PATCH v1 23/23] dts: merge DTS framework/flow/generator.py to DPDK
2022-04-06 15:18 [RFC PATCH v1 00/23] merge DTS test resource files to DPDK Juraj Linkeš
` (21 preceding siblings ...)
2022-04-06 15:19 ` [RFC PATCH v1 22/23] dts: merge DTS framework/flow/flow_rule.py " Juraj Linkeš
@ 2022-04-06 15:19 ` Juraj Linkeš
22 siblings, 0 replies; 24+ messages in thread
From: Juraj Linkeš @ 2022-04-06 15:19 UTC (permalink / raw)
To: thomas, david.marchand, Honnappa.Nagarahalli, ohilyard, lijuan.tu
Cc: dev, Juraj Linkeš
---
dts/framework/flow/generator.py | 204 ++++++++++++++++++++++++++++++++
1 file changed, 204 insertions(+)
create mode 100644 dts/framework/flow/generator.py
diff --git a/dts/framework/flow/generator.py b/dts/framework/flow/generator.py
new file mode 100644
index 0000000000..c2bde76e53
--- /dev/null
+++ b/dts/framework/flow/generator.py
@@ -0,0 +1,204 @@
+# BSD LICENSE
+#
+# Copyright(c) 2020 Intel Corporation. All rights reserved.
+# Copyright © 2018[, 2019] The University of New Hampshire. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
+import os
+import sys
+from typing import FrozenSet, Generator, Iterable, List, Set, Tuple
+
+path = os.path.dirname(os.path.dirname(__file__))
+if path not in sys.path:
+ sys.path.append(path)
+
+from .flow import Flow
+from .flow_pattern_items import (
+ ALWAYS_ALLOWED_ITEMS,
+ L3_FLOW_TYPES,
+ PATTERN_ITEMS_TYPE_CLASS_MAPPING,
+ PATTERN_OPERATION_TYPES,
+ FlowItemEth,
+ FlowItemGre,
+ FlowItemIpv4,
+ FlowItemUdp,
+ FlowItemVxlan,
+ PatternFlowItem,
+)
+from .flow_rule import FlowItemType
+
+
+def get_valid_next_protocols(current_protocol, protocol_stack, type_denylist):
+ return list(
+ filter(
+ lambda patent_item: patent_item not in type_denylist
+ and patent_item not in {p.type for p in protocol_stack},
+ current_protocol.valid_parent_items,
+ )
+ )
+
+
+def _generate(type_denylist=None) -> List[List[PatternFlowItem]]:
+ if type_denylist is None:
+ type_denylist = set()
+ UNUSED_PATTERN_ITEMS = {PATTERN_ITEMS_TYPE_CLASS_MAPPING[i] for i in type_denylist}
+
+ patterns: List[List[PatternFlowItem]] = []
+ for pattern_item in [
+ clazz
+ for clazz in PATTERN_ITEMS_TYPE_CLASS_MAPPING.values()
+ if clazz not in UNUSED_PATTERN_ITEMS
+ ]:
+ protocol_stack = []
+ if protocol_stack.count(pattern_item) >= 2:
+ continue
+
+ current_protocol = pattern_item()
+ valid_next_protocols = get_valid_next_protocols(
+ current_protocol, protocol_stack, type_denylist
+ )
+ while len(valid_next_protocols) > 0:
+ protocol_stack.append(current_protocol)
+ current_protocol = PATTERN_ITEMS_TYPE_CLASS_MAPPING[
+ list(valid_next_protocols)[0]
+ ]()
+ valid_next_protocols = get_valid_next_protocols(
+ current_protocol, protocol_stack, type_denylist
+ )
+
+ protocol_stack.append(current_protocol)
+
+ patterns.append(
+ list(reversed(protocol_stack))
+ ) # This will place the lowest level protocols first
+ return patterns
+
+
+def convert_protocol_stack_to_flow_pattern(protocol_stack):
+ return Flow(pattern_items=protocol_stack)
+
+
+def _get_patterns_with_type_denylist(type_denylist: Set):
+ return [
+ convert_protocol_stack_to_flow_pattern(protocol_stack)
+ for protocol_stack in (_generate(type_denylist=type_denylist))
+ ]
+
+
+def _get_normal_protocol_patterns() -> List[Flow]:
+ return _get_patterns_with_type_denylist(
+ PATTERN_OPERATION_TYPES
+ | ALWAYS_ALLOWED_ITEMS
+ | {FlowItemType.ANY, FlowItemType.END}
+ )
+
+
+def _get_tunnelled_protocol_patterns(patterns: List[Flow]) -> Generator[Flow]:
+ VXLAN_FLOW = Flow(
+ pattern_items=[FlowItemEth(), FlowItemIpv4(), FlowItemUdp(), FlowItemVxlan()]
+ )
+ for pattern in patterns:
+ yield VXLAN_FLOW / pattern
+
+ GRE_FLOW = Flow(pattern_items=[FlowItemEth(), FlowItemIpv4(), FlowItemGre()])
+ for pattern in patterns:
+ if len(pattern.pattern_items) >= 2:
+ if pattern.pattern_items[1].type in L3_FLOW_TYPES:
+ yield GRE_FLOW / pattern
+
+
+def get_patterns() -> Iterable[Iterable[Flow]]:
+ patterns: List[Flow] = _get_normal_protocol_patterns()
+
+ # The flow with only an ethernet header was a consequence of the
+ # generation algorithm, but isn't that useful to test since we can't
+ # create a failing case without getting each NIC to write arbitrary
+ # bytes over the link.
+ eth_only_flow = Flow(pattern_items=[FlowItemEth()])
+ patterns.remove(eth_only_flow)
+
+ # tunnelled_patterns = _get_tunnelled_protocol_patterns(patterns)
+
+ return patterns
+
+
+def add_properties_to_patterns(
+ patterns: Iterable[Flow],
+) -> Iterable[Tuple[Flow, FrozenSet[str], FrozenSet[str], str]]:
+ test_property_flow_iters = map(lambda f: f.get_test_property_flows(), patterns)
+ for iterator in test_property_flow_iters:
+ yield from iterator
+
+
+def get_patterns_with_properties() -> Iterable[
+ Tuple[Flow, FrozenSet[str], FrozenSet[str], str]
+]:
+ base_patterns = get_patterns()
+ return add_properties_to_patterns(base_patterns)
+
+
+def create_test_function_strings(
+ test_configurations: Iterable[Tuple[Flow, FrozenSet[str], FrozenSet[str], str]]
+) -> Iterable[str]:
+ """
+ This will break if the __str__ methods of frozenset ever changes or if % formatting syntax is removed.
+
+ @param test_configurations: An iterable with test configurations to convert into test case strings.
+ @return: An iterable containing strings that are function parameters.
+ """
+ function_template = """
+def test_%s(self):
+ self.do_test_with_queue_action("%s", %s, %s)
+ """
+ return map(
+ lambda test_configuration: function_template
+ % (
+ test_configuration[-1],
+ test_configuration[0],
+ test_configuration[1],
+ test_configuration[2],
+ ),
+ test_configurations,
+ )
+
+
+def main():
+ """
+ Run this file (python3 generator.py) from the flow directory to print
+ out the pattern functions which are normally automatically generated
+ and added to the RTE Flow test suite at runtime.
+ """
+ pattern_tests = list(get_patterns_with_properties())
+ pattern_functions = create_test_function_strings(pattern_tests)
+ print("\n".join(pattern_functions))
+
+
+if __name__ == "__main__":
+ main()
--
2.20.1
^ permalink raw reply [flat|nested] 24+ messages in thread