automatic DPDK test reports
 help / color / mirror / Atom feed
From: qemudev@loongson.cn
To: test-report@dpdk.org
Cc: Zhichao Zeng <zhichaox.zeng@intel.com>, zhoumin@loongson.cn
Subject: |WARNING| pw135607 [21.11.5 v2] net/iavf: support rte flow with mask for FDIR
Date: Wed, 27 Dec 2023 13:15:34 +0800	[thread overview]
Message-ID: <202312270515.3BR5FYgg2592213@localhost.localdomain> (raw)
In-Reply-To: <20231227054408.542661-1-zhichaox.zeng@intel.com>

Test-Label: loongarch-compilation
Test-Status: WARNING
http://dpdk.org/patch/135607

_apply patch failure_

Submitter: Zhichao Zeng <zhichaox.zeng@intel.com>
Date: Wed, 27 Dec 2023 13:44:08 +0800
DPDK git baseline: Repo:dpdk-next-net-intel
  Branch: main
  CommitID: f09b7a8527e265ba9531aa7c10e2452465038b3d

Apply patch set 135607 failed:

Checking patch drivers/common/iavf/virtchnl.h...
error: while searching for:
};

#define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
#define PROTO_HDR_SHIFT			5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
					(proto_hdr_type << PROTO_HDR_SHIFT)

error: patch failed: drivers/common/iavf/virtchnl.h:1482
error: drivers/common/iavf/virtchnl.h: patch does not apply
Checking patch drivers/net/iavf/iavf_fdir.c...
error: while searching for:
	const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
	const struct rte_flow_item_gre *gre_spec, *gre_mask;
	const struct rte_flow_item *item = pattern;
	struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
	struct rte_ecpri_common_hdr ecpri_common;
	uint64_t input_set = IAVF_INSET_NONE;

error: patch failed: drivers/net/iavf/iavf_fdir.c:695
error: drivers/net/iavf/iavf_fdir.c: patch does not apply
Checking patch drivers/net/iavf/iavf_hash.c...
error: while searching for:
/* proto_hdrs template */
struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
	TUNNEL_LEVEL_OUTER, 4,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}
};

struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
	 proto_hdr_ipv4_with_prot,
	 proto_hdr_udp}
};

struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
	 proto_hdr_ipv4_with_prot,
	 proto_hdr_tcp}
};

struct virtchnl_proto_hdrs outer_ipv4_sctp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
	 proto_hdr_sctp}
};

struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
	TUNNEL_LEVEL_OUTER, 4,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}
};

struct virtchnl_proto_hdrs outer_ipv6_frag_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
	 proto_hdr_ipv6, proto_hdr_ipv6_frag}
};

struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
	 proto_hdr_ipv6_with_prot,
	 proto_hdr_udp}
};

struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
	 proto_hdr_ipv6_with_prot,
	 proto_hdr_tcp}
};

struct virtchnl_proto_hdrs outer_ipv6_sctp_tmplt = {
	TUNNEL_LEVEL_OUTER, 5,
	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
	 proto_hdr_sctp}
};

struct virtchnl_proto_hdrs inner_ipv4_tmplt = {
	TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv4}
};

struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
};

struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
};

struct virtchnl_proto_hdrs second_inner_ipv4_tmplt = {
	2, 1, {proto_hdr_ipv4}
};

struct virtchnl_proto_hdrs second_inner_ipv4_udp_tmplt = {
	2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
};

struct virtchnl_proto_hdrs second_inner_ipv4_tcp_tmplt = {
	2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
};

struct virtchnl_proto_hdrs second_inner_ipv6_tmplt = {
	2, 1, {proto_hdr_ipv6}
};

struct virtchnl_proto_hdrs second_inner_ipv6_udp_tmplt = {
	2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
};

struct virtchnl_proto_hdrs second_inner_ipv6_tcp_tmplt = {
	2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
};

struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_sctp}
};

struct virtchnl_proto_hdrs inner_ipv6_tmplt = {
	TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv6}
};

struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
};

struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
};

struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_sctp}
};

struct virtchnl_proto_hdrs ipv4_esp_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_esp}
};

struct virtchnl_proto_hdrs ipv4_udp_esp_tmplt = {
	TUNNEL_LEVEL_OUTER, 3,
	{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}
};

struct virtchnl_proto_hdrs ipv4_ah_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_ah}
};

struct virtchnl_proto_hdrs ipv6_esp_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_esp}
};

struct virtchnl_proto_hdrs ipv6_udp_esp_tmplt = {
	TUNNEL_LEVEL_OUTER, 3,
	{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}
};

struct virtchnl_proto_hdrs ipv6_ah_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_ah}
};

struct virtchnl_proto_hdrs ipv4_l2tpv3_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_l2tpv3}
};

struct virtchnl_proto_hdrs ipv6_l2tpv3_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_l2tpv3}
};

struct virtchnl_proto_hdrs ipv4_pfcp_tmplt = {
	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_pfcp}
};

struct virtchnl_proto_hdrs i
error: patch failed: drivers/net/iavf/iavf_hash.c:178
error: drivers/net/iavf/iavf_hash.c: patch does not apply


       reply	other threads:[~2023-12-27  5:36 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20231227054408.542661-1-zhichaox.zeng@intel.com>
2023-12-27  5:15 ` qemudev [this message]
2023-12-27  5:35 ` checkpatch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=202312270515.3BR5FYgg2592213@localhost.localdomain \
    --to=qemudev@loongson.cn \
    --cc=test-report@dpdk.org \
    --cc=zhichaox.zeng@intel.com \
    --cc=zhoumin@loongson.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).