DPDK patches and discussions
 help / color / mirror / Atom feed
* [RFC] net/i40e: backport i40e fixes and share code to 20.11.4
@ 2022-03-28  8:04 Steve Yang
  2022-04-07  6:11 ` [RFC v2] " Steve Yang
  0 siblings, 1 reply; 2+ messages in thread
From: Steve Yang @ 2022-03-28  8:04 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Steve Yang

Backport all the i40e share code to 20.11.4 based on
cid-i40e.2022.03.08.

Backport all DPDK fixes of i40e and iavf to 20.11.4 from 22.03.

Signed-off-by: Steve Yang <stevex.yang@intel.com>
---
 drivers/common/iavf/README                  |   4 +-
 drivers/common/iavf/iavf_adminq.c           |   2 +-
 drivers/common/iavf/iavf_adminq.h           |   2 +-
 drivers/common/iavf/iavf_adminq_cmd.h       |   2 +-
 drivers/common/iavf/iavf_alloc.h            |   2 +-
 drivers/common/iavf/iavf_common.c           | 863 ++++++++++++++++-
 drivers/common/iavf/iavf_devids.h           |   3 +-
 drivers/common/iavf/iavf_impl.c             |   2 +-
 drivers/common/iavf/iavf_osdep.h            |   7 +-
 drivers/common/iavf/iavf_prototype.h        |   6 +-
 drivers/common/iavf/iavf_register.h         |   2 +-
 drivers/common/iavf/iavf_status.h           |   4 +-
 drivers/common/iavf/iavf_type.h             |  92 +-
 drivers/common/iavf/meson.build             |   2 +-
 drivers/common/iavf/siov_regs.h             |  47 +
 drivers/common/iavf/virtchnl.h              | 981 ++++++++++++++++++--
 drivers/common/iavf/virtchnl_inline_ipsec.h | 562 +++++++++++
 drivers/net/i40e/base/README                |   2 +-
 drivers/net/i40e/base/i40e_adminq.c         |  73 +-
 drivers/net/i40e/base/i40e_adminq_cmd.h     |  49 +-
 drivers/net/i40e/base/i40e_common.c         | 185 +++-
 drivers/net/i40e/base/i40e_dcb.c            |   8 +-
 drivers/net/i40e/base/i40e_devids.h         |   2 +
 drivers/net/i40e/base/i40e_osdep.h          |   2 +-
 drivers/net/i40e/base/i40e_prototype.h      |  18 +
 drivers/net/i40e/base/i40e_register.h       |  55 +-
 drivers/net/i40e/base/i40e_status.h         |   2 +-
 drivers/net/i40e/base/i40e_type.h           |  25 +-
 drivers/net/i40e/i40e_ethdev.c              |  63 +-
 drivers/net/i40e/i40e_ethdev_vf.c           |   6 +-
 drivers/net/i40e/i40e_regs.h                |  11 +-
 drivers/net/i40e/i40e_rxtx.c                |   6 +-
 drivers/net/iavf/iavf_ethdev.c              |  47 +-
 drivers/net/iavf/iavf_rxtx.c                | 103 +-
 drivers/net/iavf/iavf_rxtx.h                |  40 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c       |   2 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c     |   4 +-
 drivers/net/iavf/iavf_rxtx_vec_common.h     |   2 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c        |   4 +-
 drivers/net/iavf/iavf_vchnl.c               |   5 +-
 drivers/net/ice/ice_dcf.c                   |   4 +-
 41 files changed, 3001 insertions(+), 300 deletions(-)
 create mode 100644 drivers/common/iavf/siov_regs.h
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/README b/drivers/common/iavf/README
index 5a42750465..f59839ff97 100644
--- a/drivers/common/iavf/README
+++ b/drivers/common/iavf/README
@@ -1,12 +1,12 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Intel Corporation
+ * Copyright(c) 2019-2021 Intel Corporation
  */
 
 Intel® IAVF driver
 =================
 
 This directory contains source code of FreeBSD IAVF driver of version
-cid-avf.2020.10.14.tar.gz released by the team which develops
+cid-avf.2022.03.08.tar.gz released by the team which develops
 basic drivers for any IAVF NIC. The directory of base/ contains the
 original source package.
 
diff --git a/drivers/common/iavf/iavf_adminq.c b/drivers/common/iavf/iavf_adminq.c
index 8d03de0553..9c36e8908e 100644
--- a/drivers/common/iavf/iavf_adminq.c
+++ b/drivers/common/iavf/iavf_adminq.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #include "iavf_status.h"
diff --git a/drivers/common/iavf/iavf_adminq.h b/drivers/common/iavf/iavf_adminq.h
index 93214162eb..e2374f9b95 100644
--- a/drivers/common/iavf/iavf_adminq.h
+++ b/drivers/common/iavf/iavf_adminq.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ADMINQ_H_
diff --git a/drivers/common/iavf/iavf_adminq_cmd.h b/drivers/common/iavf/iavf_adminq_cmd.h
index 5b748426ad..2a3006a526 100644
--- a/drivers/common/iavf/iavf_adminq_cmd.h
+++ b/drivers/common/iavf/iavf_adminq_cmd.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ADMINQ_CMD_H_
diff --git a/drivers/common/iavf/iavf_alloc.h b/drivers/common/iavf/iavf_alloc.h
index 7b7a205cff..6ef8da65a1 100644
--- a/drivers/common/iavf/iavf_alloc.h
+++ b/drivers/common/iavf/iavf_alloc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ALLOC_H_
diff --git a/drivers/common/iavf/iavf_common.c b/drivers/common/iavf/iavf_common.c
index c951b7d787..03872dece8 100644
--- a/drivers/common/iavf/iavf_common.c
+++ b/drivers/common/iavf/iavf_common.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #include "iavf_type.h"
@@ -135,8 +135,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
 		return "IAVF_ERR_INVALID_MAC_ADDR";
 	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
 		return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
-	case IAVF_ERR_MASTER_REQUESTS_PENDING:
-		return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+		return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
 	case IAVF_ERR_INVALID_LINK_SETTINGS:
 		return "IAVF_ERR_INVALID_LINK_SETTINGS";
 	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
@@ -520,9 +520,9 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
 	return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
 }
 
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
+/* The iavf_ptype_lookup table is used to convert from the 8-bit and 10-bit
+ * ptype in the hardware to a bit-field that can be used by SW to more easily
+ * determine the packet type.
  *
  * Macros are used to shorten the table lines and make this table human
  * readable.
@@ -882,7 +882,852 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
 	IAVF_PTT_UNUSED_ENTRY(252),
 	IAVF_PTT_UNUSED_ENTRY(253),
 	IAVF_PTT_UNUSED_ENTRY(254),
-	IAVF_PTT_UNUSED_ENTRY(255)
+	IAVF_PTT_UNUSED_ENTRY(255),
+	IAVF_PTT_UNUSED_ENTRY(256),
+	IAVF_PTT_UNUSED_ENTRY(257),
+	IAVF_PTT_UNUSED_ENTRY(258),
+	IAVF_PTT_UNUSED_ENTRY(259),
+
+	IAVF_PTT_UNUSED_ENTRY(260),
+	IAVF_PTT_UNUSED_ENTRY(261),
+	IAVF_PTT_UNUSED_ENTRY(262),
+	IAVF_PTT_UNUSED_ENTRY(263),
+	IAVF_PTT_UNUSED_ENTRY(264),
+	IAVF_PTT_UNUSED_ENTRY(265),
+	IAVF_PTT_UNUSED_ENTRY(266),
+	IAVF_PTT_UNUSED_ENTRY(267),
+	IAVF_PTT_UNUSED_ENTRY(268),
+	IAVF_PTT_UNUSED_ENTRY(269),
+
+	IAVF_PTT_UNUSED_ENTRY(270),
+	IAVF_PTT_UNUSED_ENTRY(271),
+	IAVF_PTT_UNUSED_ENTRY(272),
+	IAVF_PTT_UNUSED_ENTRY(273),
+	IAVF_PTT_UNUSED_ENTRY(274),
+	IAVF_PTT_UNUSED_ENTRY(275),
+	IAVF_PTT_UNUSED_ENTRY(276),
+	IAVF_PTT_UNUSED_ENTRY(277),
+	IAVF_PTT_UNUSED_ENTRY(278),
+	IAVF_PTT_UNUSED_ENTRY(279),
+
+	IAVF_PTT_UNUSED_ENTRY(280),
+	IAVF_PTT_UNUSED_ENTRY(281),
+	IAVF_PTT_UNUSED_ENTRY(282),
+	IAVF_PTT_UNUSED_ENTRY(283),
+	IAVF_PTT_UNUSED_ENTRY(284),
+	IAVF_PTT_UNUSED_ENTRY(285),
+	IAVF_PTT_UNUSED_ENTRY(286),
+	IAVF_PTT_UNUSED_ENTRY(287),
+	IAVF_PTT_UNUSED_ENTRY(288),
+	IAVF_PTT_UNUSED_ENTRY(289),
+
+	IAVF_PTT_UNUSED_ENTRY(290),
+	IAVF_PTT_UNUSED_ENTRY(291),
+	IAVF_PTT_UNUSED_ENTRY(292),
+	IAVF_PTT_UNUSED_ENTRY(293),
+	IAVF_PTT_UNUSED_ENTRY(294),
+	IAVF_PTT_UNUSED_ENTRY(295),
+	IAVF_PTT_UNUSED_ENTRY(296),
+	IAVF_PTT_UNUSED_ENTRY(297),
+	IAVF_PTT_UNUSED_ENTRY(298),
+	IAVF_PTT_UNUSED_ENTRY(299),
+
+	IAVF_PTT_UNUSED_ENTRY(300),
+	IAVF_PTT_UNUSED_ENTRY(301),
+	IAVF_PTT_UNUSED_ENTRY(302),
+	IAVF_PTT_UNUSED_ENTRY(303),
+	IAVF_PTT_UNUSED_ENTRY(304),
+	IAVF_PTT_UNUSED_ENTRY(305),
+	IAVF_PTT_UNUSED_ENTRY(306),
+	IAVF_PTT_UNUSED_ENTRY(307),
+	IAVF_PTT_UNUSED_ENTRY(308),
+	IAVF_PTT_UNUSED_ENTRY(309),
+
+	IAVF_PTT_UNUSED_ENTRY(310),
+	IAVF_PTT_UNUSED_ENTRY(311),
+	IAVF_PTT_UNUSED_ENTRY(312),
+	IAVF_PTT_UNUSED_ENTRY(313),
+	IAVF_PTT_UNUSED_ENTRY(314),
+	IAVF_PTT_UNUSED_ENTRY(315),
+	IAVF_PTT_UNUSED_ENTRY(316),
+	IAVF_PTT_UNUSED_ENTRY(317),
+	IAVF_PTT_UNUSED_ENTRY(318),
+	IAVF_PTT_UNUSED_ENTRY(319),
+
+	IAVF_PTT_UNUSED_ENTRY(320),
+	IAVF_PTT_UNUSED_ENTRY(321),
+	IAVF_PTT_UNUSED_ENTRY(322),
+	IAVF_PTT_UNUSED_ENTRY(323),
+	IAVF_PTT_UNUSED_ENTRY(324),
+	IAVF_PTT_UNUSED_ENTRY(325),
+	IAVF_PTT_UNUSED_ENTRY(326),
+	IAVF_PTT_UNUSED_ENTRY(327),
+	IAVF_PTT_UNUSED_ENTRY(328),
+	IAVF_PTT_UNUSED_ENTRY(329),
+
+	IAVF_PTT_UNUSED_ENTRY(330),
+	IAVF_PTT_UNUSED_ENTRY(331),
+	IAVF_PTT_UNUSED_ENTRY(332),
+	IAVF_PTT_UNUSED_ENTRY(333),
+	IAVF_PTT_UNUSED_ENTRY(334),
+	IAVF_PTT_UNUSED_ENTRY(335),
+	IAVF_PTT_UNUSED_ENTRY(336),
+	IAVF_PTT_UNUSED_ENTRY(337),
+	IAVF_PTT_UNUSED_ENTRY(338),
+	IAVF_PTT_UNUSED_ENTRY(339),
+
+	IAVF_PTT_UNUSED_ENTRY(340),
+	IAVF_PTT_UNUSED_ENTRY(341),
+	IAVF_PTT_UNUSED_ENTRY(342),
+	IAVF_PTT_UNUSED_ENTRY(343),
+	IAVF_PTT_UNUSED_ENTRY(344),
+	IAVF_PTT_UNUSED_ENTRY(345),
+	IAVF_PTT_UNUSED_ENTRY(346),
+	IAVF_PTT_UNUSED_ENTRY(347),
+	IAVF_PTT_UNUSED_ENTRY(348),
+	IAVF_PTT_UNUSED_ENTRY(349),
+
+	IAVF_PTT_UNUSED_ENTRY(350),
+	IAVF_PTT_UNUSED_ENTRY(351),
+	IAVF_PTT_UNUSED_ENTRY(352),
+	IAVF_PTT_UNUSED_ENTRY(353),
+	IAVF_PTT_UNUSED_ENTRY(354),
+	IAVF_PTT_UNUSED_ENTRY(355),
+	IAVF_PTT_UNUSED_ENTRY(356),
+	IAVF_PTT_UNUSED_ENTRY(357),
+	IAVF_PTT_UNUSED_ENTRY(358),
+	IAVF_PTT_UNUSED_ENTRY(359),
+
+	IAVF_PTT_UNUSED_ENTRY(360),
+	IAVF_PTT_UNUSED_ENTRY(361),
+	IAVF_PTT_UNUSED_ENTRY(362),
+	IAVF_PTT_UNUSED_ENTRY(363),
+	IAVF_PTT_UNUSED_ENTRY(364),
+	IAVF_PTT_UNUSED_ENTRY(365),
+	IAVF_PTT_UNUSED_ENTRY(366),
+	IAVF_PTT_UNUSED_ENTRY(367),
+	IAVF_PTT_UNUSED_ENTRY(368),
+	IAVF_PTT_UNUSED_ENTRY(369),
+
+	IAVF_PTT_UNUSED_ENTRY(370),
+	IAVF_PTT_UNUSED_ENTRY(371),
+	IAVF_PTT_UNUSED_ENTRY(372),
+	IAVF_PTT_UNUSED_ENTRY(373),
+	IAVF_PTT_UNUSED_ENTRY(374),
+	IAVF_PTT_UNUSED_ENTRY(375),
+	IAVF_PTT_UNUSED_ENTRY(376),
+	IAVF_PTT_UNUSED_ENTRY(377),
+	IAVF_PTT_UNUSED_ENTRY(378),
+	IAVF_PTT_UNUSED_ENTRY(379),
+
+	IAVF_PTT_UNUSED_ENTRY(380),
+	IAVF_PTT_UNUSED_ENTRY(381),
+	IAVF_PTT_UNUSED_ENTRY(382),
+	IAVF_PTT_UNUSED_ENTRY(383),
+	IAVF_PTT_UNUSED_ENTRY(384),
+	IAVF_PTT_UNUSED_ENTRY(385),
+	IAVF_PTT_UNUSED_ENTRY(386),
+	IAVF_PTT_UNUSED_ENTRY(387),
+	IAVF_PTT_UNUSED_ENTRY(388),
+	IAVF_PTT_UNUSED_ENTRY(389),
+
+	IAVF_PTT_UNUSED_ENTRY(390),
+	IAVF_PTT_UNUSED_ENTRY(391),
+	IAVF_PTT_UNUSED_ENTRY(392),
+	IAVF_PTT_UNUSED_ENTRY(393),
+	IAVF_PTT_UNUSED_ENTRY(394),
+	IAVF_PTT_UNUSED_ENTRY(395),
+	IAVF_PTT_UNUSED_ENTRY(396),
+	IAVF_PTT_UNUSED_ENTRY(397),
+	IAVF_PTT_UNUSED_ENTRY(398),
+	IAVF_PTT_UNUSED_ENTRY(399),
+
+	IAVF_PTT_UNUSED_ENTRY(400),
+	IAVF_PTT_UNUSED_ENTRY(401),
+	IAVF_PTT_UNUSED_ENTRY(402),
+	IAVF_PTT_UNUSED_ENTRY(403),
+	IAVF_PTT_UNUSED_ENTRY(404),
+	IAVF_PTT_UNUSED_ENTRY(405),
+	IAVF_PTT_UNUSED_ENTRY(406),
+	IAVF_PTT_UNUSED_ENTRY(407),
+	IAVF_PTT_UNUSED_ENTRY(408),
+	IAVF_PTT_UNUSED_ENTRY(409),
+
+	IAVF_PTT_UNUSED_ENTRY(410),
+	IAVF_PTT_UNUSED_ENTRY(411),
+	IAVF_PTT_UNUSED_ENTRY(412),
+	IAVF_PTT_UNUSED_ENTRY(413),
+	IAVF_PTT_UNUSED_ENTRY(414),
+	IAVF_PTT_UNUSED_ENTRY(415),
+	IAVF_PTT_UNUSED_ENTRY(416),
+	IAVF_PTT_UNUSED_ENTRY(417),
+	IAVF_PTT_UNUSED_ENTRY(418),
+	IAVF_PTT_UNUSED_ENTRY(419),
+
+	IAVF_PTT_UNUSED_ENTRY(420),
+	IAVF_PTT_UNUSED_ENTRY(421),
+	IAVF_PTT_UNUSED_ENTRY(422),
+	IAVF_PTT_UNUSED_ENTRY(423),
+	IAVF_PTT_UNUSED_ENTRY(424),
+	IAVF_PTT_UNUSED_ENTRY(425),
+	IAVF_PTT_UNUSED_ENTRY(426),
+	IAVF_PTT_UNUSED_ENTRY(427),
+	IAVF_PTT_UNUSED_ENTRY(428),
+	IAVF_PTT_UNUSED_ENTRY(429),
+
+	IAVF_PTT_UNUSED_ENTRY(430),
+	IAVF_PTT_UNUSED_ENTRY(431),
+	IAVF_PTT_UNUSED_ENTRY(432),
+	IAVF_PTT_UNUSED_ENTRY(433),
+	IAVF_PTT_UNUSED_ENTRY(434),
+	IAVF_PTT_UNUSED_ENTRY(435),
+	IAVF_PTT_UNUSED_ENTRY(436),
+	IAVF_PTT_UNUSED_ENTRY(437),
+	IAVF_PTT_UNUSED_ENTRY(438),
+	IAVF_PTT_UNUSED_ENTRY(439),
+
+	IAVF_PTT_UNUSED_ENTRY(440),
+	IAVF_PTT_UNUSED_ENTRY(441),
+	IAVF_PTT_UNUSED_ENTRY(442),
+	IAVF_PTT_UNUSED_ENTRY(443),
+	IAVF_PTT_UNUSED_ENTRY(444),
+	IAVF_PTT_UNUSED_ENTRY(445),
+	IAVF_PTT_UNUSED_ENTRY(446),
+	IAVF_PTT_UNUSED_ENTRY(447),
+	IAVF_PTT_UNUSED_ENTRY(448),
+	IAVF_PTT_UNUSED_ENTRY(449),
+
+	IAVF_PTT_UNUSED_ENTRY(450),
+	IAVF_PTT_UNUSED_ENTRY(451),
+	IAVF_PTT_UNUSED_ENTRY(452),
+	IAVF_PTT_UNUSED_ENTRY(453),
+	IAVF_PTT_UNUSED_ENTRY(454),
+	IAVF_PTT_UNUSED_ENTRY(455),
+	IAVF_PTT_UNUSED_ENTRY(456),
+	IAVF_PTT_UNUSED_ENTRY(457),
+	IAVF_PTT_UNUSED_ENTRY(458),
+	IAVF_PTT_UNUSED_ENTRY(459),
+
+	IAVF_PTT_UNUSED_ENTRY(460),
+	IAVF_PTT_UNUSED_ENTRY(461),
+	IAVF_PTT_UNUSED_ENTRY(462),
+	IAVF_PTT_UNUSED_ENTRY(463),
+	IAVF_PTT_UNUSED_ENTRY(464),
+	IAVF_PTT_UNUSED_ENTRY(465),
+	IAVF_PTT_UNUSED_ENTRY(466),
+	IAVF_PTT_UNUSED_ENTRY(467),
+	IAVF_PTT_UNUSED_ENTRY(468),
+	IAVF_PTT_UNUSED_ENTRY(469),
+
+	IAVF_PTT_UNUSED_ENTRY(470),
+	IAVF_PTT_UNUSED_ENTRY(471),
+	IAVF_PTT_UNUSED_ENTRY(472),
+	IAVF_PTT_UNUSED_ENTRY(473),
+	IAVF_PTT_UNUSED_ENTRY(474),
+	IAVF_PTT_UNUSED_ENTRY(475),
+	IAVF_PTT_UNUSED_ENTRY(476),
+	IAVF_PTT_UNUSED_ENTRY(477),
+	IAVF_PTT_UNUSED_ENTRY(478),
+	IAVF_PTT_UNUSED_ENTRY(479),
+
+	IAVF_PTT_UNUSED_ENTRY(480),
+	IAVF_PTT_UNUSED_ENTRY(481),
+	IAVF_PTT_UNUSED_ENTRY(482),
+	IAVF_PTT_UNUSED_ENTRY(483),
+	IAVF_PTT_UNUSED_ENTRY(484),
+	IAVF_PTT_UNUSED_ENTRY(485),
+	IAVF_PTT_UNUSED_ENTRY(486),
+	IAVF_PTT_UNUSED_ENTRY(487),
+	IAVF_PTT_UNUSED_ENTRY(488),
+	IAVF_PTT_UNUSED_ENTRY(489),
+
+	IAVF_PTT_UNUSED_ENTRY(490),
+	IAVF_PTT_UNUSED_ENTRY(491),
+	IAVF_PTT_UNUSED_ENTRY(492),
+	IAVF_PTT_UNUSED_ENTRY(493),
+	IAVF_PTT_UNUSED_ENTRY(494),
+	IAVF_PTT_UNUSED_ENTRY(495),
+	IAVF_PTT_UNUSED_ENTRY(496),
+	IAVF_PTT_UNUSED_ENTRY(497),
+	IAVF_PTT_UNUSED_ENTRY(498),
+	IAVF_PTT_UNUSED_ENTRY(499),
+
+	IAVF_PTT_UNUSED_ENTRY(500),
+	IAVF_PTT_UNUSED_ENTRY(501),
+	IAVF_PTT_UNUSED_ENTRY(502),
+	IAVF_PTT_UNUSED_ENTRY(503),
+	IAVF_PTT_UNUSED_ENTRY(504),
+	IAVF_PTT_UNUSED_ENTRY(505),
+	IAVF_PTT_UNUSED_ENTRY(506),
+	IAVF_PTT_UNUSED_ENTRY(507),
+	IAVF_PTT_UNUSED_ENTRY(508),
+	IAVF_PTT_UNUSED_ENTRY(509),
+
+	IAVF_PTT_UNUSED_ENTRY(510),
+	IAVF_PTT_UNUSED_ENTRY(511),
+	IAVF_PTT_UNUSED_ENTRY(512),
+	IAVF_PTT_UNUSED_ENTRY(513),
+	IAVF_PTT_UNUSED_ENTRY(514),
+	IAVF_PTT_UNUSED_ENTRY(515),
+	IAVF_PTT_UNUSED_ENTRY(516),
+	IAVF_PTT_UNUSED_ENTRY(517),
+	IAVF_PTT_UNUSED_ENTRY(518),
+	IAVF_PTT_UNUSED_ENTRY(519),
+
+	IAVF_PTT_UNUSED_ENTRY(520),
+	IAVF_PTT_UNUSED_ENTRY(521),
+	IAVF_PTT_UNUSED_ENTRY(522),
+	IAVF_PTT_UNUSED_ENTRY(523),
+	IAVF_PTT_UNUSED_ENTRY(524),
+	IAVF_PTT_UNUSED_ENTRY(525),
+	IAVF_PTT_UNUSED_ENTRY(526),
+	IAVF_PTT_UNUSED_ENTRY(527),
+	IAVF_PTT_UNUSED_ENTRY(528),
+	IAVF_PTT_UNUSED_ENTRY(529),
+
+	IAVF_PTT_UNUSED_ENTRY(530),
+	IAVF_PTT_UNUSED_ENTRY(531),
+	IAVF_PTT_UNUSED_ENTRY(532),
+	IAVF_PTT_UNUSED_ENTRY(533),
+	IAVF_PTT_UNUSED_ENTRY(534),
+	IAVF_PTT_UNUSED_ENTRY(535),
+	IAVF_PTT_UNUSED_ENTRY(536),
+	IAVF_PTT_UNUSED_ENTRY(537),
+	IAVF_PTT_UNUSED_ENTRY(538),
+	IAVF_PTT_UNUSED_ENTRY(539),
+
+	IAVF_PTT_UNUSED_ENTRY(540),
+	IAVF_PTT_UNUSED_ENTRY(541),
+	IAVF_PTT_UNUSED_ENTRY(542),
+	IAVF_PTT_UNUSED_ENTRY(543),
+	IAVF_PTT_UNUSED_ENTRY(544),
+	IAVF_PTT_UNUSED_ENTRY(545),
+	IAVF_PTT_UNUSED_ENTRY(546),
+	IAVF_PTT_UNUSED_ENTRY(547),
+	IAVF_PTT_UNUSED_ENTRY(548),
+	IAVF_PTT_UNUSED_ENTRY(549),
+
+	IAVF_PTT_UNUSED_ENTRY(550),
+	IAVF_PTT_UNUSED_ENTRY(551),
+	IAVF_PTT_UNUSED_ENTRY(552),
+	IAVF_PTT_UNUSED_ENTRY(553),
+	IAVF_PTT_UNUSED_ENTRY(554),
+	IAVF_PTT_UNUSED_ENTRY(555),
+	IAVF_PTT_UNUSED_ENTRY(556),
+	IAVF_PTT_UNUSED_ENTRY(557),
+	IAVF_PTT_UNUSED_ENTRY(558),
+	IAVF_PTT_UNUSED_ENTRY(559),
+
+	IAVF_PTT_UNUSED_ENTRY(560),
+	IAVF_PTT_UNUSED_ENTRY(561),
+	IAVF_PTT_UNUSED_ENTRY(562),
+	IAVF_PTT_UNUSED_ENTRY(563),
+	IAVF_PTT_UNUSED_ENTRY(564),
+	IAVF_PTT_UNUSED_ENTRY(565),
+	IAVF_PTT_UNUSED_ENTRY(566),
+	IAVF_PTT_UNUSED_ENTRY(567),
+	IAVF_PTT_UNUSED_ENTRY(568),
+	IAVF_PTT_UNUSED_ENTRY(569),
+
+	IAVF_PTT_UNUSED_ENTRY(570),
+	IAVF_PTT_UNUSED_ENTRY(571),
+	IAVF_PTT_UNUSED_ENTRY(572),
+	IAVF_PTT_UNUSED_ENTRY(573),
+	IAVF_PTT_UNUSED_ENTRY(574),
+	IAVF_PTT_UNUSED_ENTRY(575),
+	IAVF_PTT_UNUSED_ENTRY(576),
+	IAVF_PTT_UNUSED_ENTRY(577),
+	IAVF_PTT_UNUSED_ENTRY(578),
+	IAVF_PTT_UNUSED_ENTRY(579),
+
+	IAVF_PTT_UNUSED_ENTRY(580),
+	IAVF_PTT_UNUSED_ENTRY(581),
+	IAVF_PTT_UNUSED_ENTRY(582),
+	IAVF_PTT_UNUSED_ENTRY(583),
+	IAVF_PTT_UNUSED_ENTRY(584),
+	IAVF_PTT_UNUSED_ENTRY(585),
+	IAVF_PTT_UNUSED_ENTRY(586),
+	IAVF_PTT_UNUSED_ENTRY(587),
+	IAVF_PTT_UNUSED_ENTRY(588),
+	IAVF_PTT_UNUSED_ENTRY(589),
+
+	IAVF_PTT_UNUSED_ENTRY(590),
+	IAVF_PTT_UNUSED_ENTRY(591),
+	IAVF_PTT_UNUSED_ENTRY(592),
+	IAVF_PTT_UNUSED_ENTRY(593),
+	IAVF_PTT_UNUSED_ENTRY(594),
+	IAVF_PTT_UNUSED_ENTRY(595),
+	IAVF_PTT_UNUSED_ENTRY(596),
+	IAVF_PTT_UNUSED_ENTRY(597),
+	IAVF_PTT_UNUSED_ENTRY(598),
+	IAVF_PTT_UNUSED_ENTRY(599),
+
+	IAVF_PTT_UNUSED_ENTRY(600),
+	IAVF_PTT_UNUSED_ENTRY(601),
+	IAVF_PTT_UNUSED_ENTRY(602),
+	IAVF_PTT_UNUSED_ENTRY(603),
+	IAVF_PTT_UNUSED_ENTRY(604),
+	IAVF_PTT_UNUSED_ENTRY(605),
+	IAVF_PTT_UNUSED_ENTRY(606),
+	IAVF_PTT_UNUSED_ENTRY(607),
+	IAVF_PTT_UNUSED_ENTRY(608),
+	IAVF_PTT_UNUSED_ENTRY(609),
+
+	IAVF_PTT_UNUSED_ENTRY(610),
+	IAVF_PTT_UNUSED_ENTRY(611),
+	IAVF_PTT_UNUSED_ENTRY(612),
+	IAVF_PTT_UNUSED_ENTRY(613),
+	IAVF_PTT_UNUSED_ENTRY(614),
+	IAVF_PTT_UNUSED_ENTRY(615),
+	IAVF_PTT_UNUSED_ENTRY(616),
+	IAVF_PTT_UNUSED_ENTRY(617),
+	IAVF_PTT_UNUSED_ENTRY(618),
+	IAVF_PTT_UNUSED_ENTRY(619),
+
+	IAVF_PTT_UNUSED_ENTRY(620),
+	IAVF_PTT_UNUSED_ENTRY(621),
+	IAVF_PTT_UNUSED_ENTRY(622),
+	IAVF_PTT_UNUSED_ENTRY(623),
+	IAVF_PTT_UNUSED_ENTRY(624),
+	IAVF_PTT_UNUSED_ENTRY(625),
+	IAVF_PTT_UNUSED_ENTRY(626),
+	IAVF_PTT_UNUSED_ENTRY(627),
+	IAVF_PTT_UNUSED_ENTRY(628),
+	IAVF_PTT_UNUSED_ENTRY(629),
+
+	IAVF_PTT_UNUSED_ENTRY(630),
+	IAVF_PTT_UNUSED_ENTRY(631),
+	IAVF_PTT_UNUSED_ENTRY(632),
+	IAVF_PTT_UNUSED_ENTRY(633),
+	IAVF_PTT_UNUSED_ENTRY(634),
+	IAVF_PTT_UNUSED_ENTRY(635),
+	IAVF_PTT_UNUSED_ENTRY(636),
+	IAVF_PTT_UNUSED_ENTRY(637),
+	IAVF_PTT_UNUSED_ENTRY(638),
+	IAVF_PTT_UNUSED_ENTRY(639),
+
+	IAVF_PTT_UNUSED_ENTRY(640),
+	IAVF_PTT_UNUSED_ENTRY(641),
+	IAVF_PTT_UNUSED_ENTRY(642),
+	IAVF_PTT_UNUSED_ENTRY(643),
+	IAVF_PTT_UNUSED_ENTRY(644),
+	IAVF_PTT_UNUSED_ENTRY(645),
+	IAVF_PTT_UNUSED_ENTRY(646),
+	IAVF_PTT_UNUSED_ENTRY(647),
+	IAVF_PTT_UNUSED_ENTRY(648),
+	IAVF_PTT_UNUSED_ENTRY(649),
+
+	IAVF_PTT_UNUSED_ENTRY(650),
+	IAVF_PTT_UNUSED_ENTRY(651),
+	IAVF_PTT_UNUSED_ENTRY(652),
+	IAVF_PTT_UNUSED_ENTRY(653),
+	IAVF_PTT_UNUSED_ENTRY(654),
+	IAVF_PTT_UNUSED_ENTRY(655),
+	IAVF_PTT_UNUSED_ENTRY(656),
+	IAVF_PTT_UNUSED_ENTRY(657),
+	IAVF_PTT_UNUSED_ENTRY(658),
+	IAVF_PTT_UNUSED_ENTRY(659),
+
+	IAVF_PTT_UNUSED_ENTRY(660),
+	IAVF_PTT_UNUSED_ENTRY(661),
+	IAVF_PTT_UNUSED_ENTRY(662),
+	IAVF_PTT_UNUSED_ENTRY(663),
+	IAVF_PTT_UNUSED_ENTRY(664),
+	IAVF_PTT_UNUSED_ENTRY(665),
+	IAVF_PTT_UNUSED_ENTRY(666),
+	IAVF_PTT_UNUSED_ENTRY(667),
+	IAVF_PTT_UNUSED_ENTRY(668),
+	IAVF_PTT_UNUSED_ENTRY(669),
+
+	IAVF_PTT_UNUSED_ENTRY(670),
+	IAVF_PTT_UNUSED_ENTRY(671),
+	IAVF_PTT_UNUSED_ENTRY(672),
+	IAVF_PTT_UNUSED_ENTRY(673),
+	IAVF_PTT_UNUSED_ENTRY(674),
+	IAVF_PTT_UNUSED_ENTRY(675),
+	IAVF_PTT_UNUSED_ENTRY(676),
+	IAVF_PTT_UNUSED_ENTRY(677),
+	IAVF_PTT_UNUSED_ENTRY(678),
+	IAVF_PTT_UNUSED_ENTRY(679),
+
+	IAVF_PTT_UNUSED_ENTRY(680),
+	IAVF_PTT_UNUSED_ENTRY(681),
+	IAVF_PTT_UNUSED_ENTRY(682),
+	IAVF_PTT_UNUSED_ENTRY(683),
+	IAVF_PTT_UNUSED_ENTRY(684),
+	IAVF_PTT_UNUSED_ENTRY(685),
+	IAVF_PTT_UNUSED_ENTRY(686),
+	IAVF_PTT_UNUSED_ENTRY(687),
+	IAVF_PTT_UNUSED_ENTRY(688),
+	IAVF_PTT_UNUSED_ENTRY(689),
+
+	IAVF_PTT_UNUSED_ENTRY(690),
+	IAVF_PTT_UNUSED_ENTRY(691),
+	IAVF_PTT_UNUSED_ENTRY(692),
+	IAVF_PTT_UNUSED_ENTRY(693),
+	IAVF_PTT_UNUSED_ENTRY(694),
+	IAVF_PTT_UNUSED_ENTRY(695),
+	IAVF_PTT_UNUSED_ENTRY(696),
+	IAVF_PTT_UNUSED_ENTRY(697),
+	IAVF_PTT_UNUSED_ENTRY(698),
+	IAVF_PTT_UNUSED_ENTRY(699),
+
+	IAVF_PTT_UNUSED_ENTRY(700),
+	IAVF_PTT_UNUSED_ENTRY(701),
+	IAVF_PTT_UNUSED_ENTRY(702),
+	IAVF_PTT_UNUSED_ENTRY(703),
+	IAVF_PTT_UNUSED_ENTRY(704),
+	IAVF_PTT_UNUSED_ENTRY(705),
+	IAVF_PTT_UNUSED_ENTRY(706),
+	IAVF_PTT_UNUSED_ENTRY(707),
+	IAVF_PTT_UNUSED_ENTRY(708),
+	IAVF_PTT_UNUSED_ENTRY(709),
+
+	IAVF_PTT_UNUSED_ENTRY(710),
+	IAVF_PTT_UNUSED_ENTRY(711),
+	IAVF_PTT_UNUSED_ENTRY(712),
+	IAVF_PTT_UNUSED_ENTRY(713),
+	IAVF_PTT_UNUSED_ENTRY(714),
+	IAVF_PTT_UNUSED_ENTRY(715),
+	IAVF_PTT_UNUSED_ENTRY(716),
+	IAVF_PTT_UNUSED_ENTRY(717),
+	IAVF_PTT_UNUSED_ENTRY(718),
+	IAVF_PTT_UNUSED_ENTRY(719),
+
+	IAVF_PTT_UNUSED_ENTRY(720),
+	IAVF_PTT_UNUSED_ENTRY(721),
+	IAVF_PTT_UNUSED_ENTRY(722),
+	IAVF_PTT_UNUSED_ENTRY(723),
+	IAVF_PTT_UNUSED_ENTRY(724),
+	IAVF_PTT_UNUSED_ENTRY(725),
+	IAVF_PTT_UNUSED_ENTRY(726),
+	IAVF_PTT_UNUSED_ENTRY(727),
+	IAVF_PTT_UNUSED_ENTRY(728),
+	IAVF_PTT_UNUSED_ENTRY(729),
+
+	IAVF_PTT_UNUSED_ENTRY(730),
+	IAVF_PTT_UNUSED_ENTRY(731),
+	IAVF_PTT_UNUSED_ENTRY(732),
+	IAVF_PTT_UNUSED_ENTRY(733),
+	IAVF_PTT_UNUSED_ENTRY(734),
+	IAVF_PTT_UNUSED_ENTRY(735),
+	IAVF_PTT_UNUSED_ENTRY(736),
+	IAVF_PTT_UNUSED_ENTRY(737),
+	IAVF_PTT_UNUSED_ENTRY(738),
+	IAVF_PTT_UNUSED_ENTRY(739),
+
+	IAVF_PTT_UNUSED_ENTRY(740),
+	IAVF_PTT_UNUSED_ENTRY(741),
+	IAVF_PTT_UNUSED_ENTRY(742),
+	IAVF_PTT_UNUSED_ENTRY(743),
+	IAVF_PTT_UNUSED_ENTRY(744),
+	IAVF_PTT_UNUSED_ENTRY(745),
+	IAVF_PTT_UNUSED_ENTRY(746),
+	IAVF_PTT_UNUSED_ENTRY(747),
+	IAVF_PTT_UNUSED_ENTRY(748),
+	IAVF_PTT_UNUSED_ENTRY(749),
+
+	IAVF_PTT_UNUSED_ENTRY(750),
+	IAVF_PTT_UNUSED_ENTRY(751),
+	IAVF_PTT_UNUSED_ENTRY(752),
+	IAVF_PTT_UNUSED_ENTRY(753),
+	IAVF_PTT_UNUSED_ENTRY(754),
+	IAVF_PTT_UNUSED_ENTRY(755),
+	IAVF_PTT_UNUSED_ENTRY(756),
+	IAVF_PTT_UNUSED_ENTRY(757),
+	IAVF_PTT_UNUSED_ENTRY(758),
+	IAVF_PTT_UNUSED_ENTRY(759),
+
+	IAVF_PTT_UNUSED_ENTRY(760),
+	IAVF_PTT_UNUSED_ENTRY(761),
+	IAVF_PTT_UNUSED_ENTRY(762),
+	IAVF_PTT_UNUSED_ENTRY(763),
+	IAVF_PTT_UNUSED_ENTRY(764),
+	IAVF_PTT_UNUSED_ENTRY(765),
+	IAVF_PTT_UNUSED_ENTRY(766),
+	IAVF_PTT_UNUSED_ENTRY(767),
+	IAVF_PTT_UNUSED_ENTRY(768),
+	IAVF_PTT_UNUSED_ENTRY(769),
+
+	IAVF_PTT_UNUSED_ENTRY(770),
+	IAVF_PTT_UNUSED_ENTRY(771),
+	IAVF_PTT_UNUSED_ENTRY(772),
+	IAVF_PTT_UNUSED_ENTRY(773),
+	IAVF_PTT_UNUSED_ENTRY(774),
+	IAVF_PTT_UNUSED_ENTRY(775),
+	IAVF_PTT_UNUSED_ENTRY(776),
+	IAVF_PTT_UNUSED_ENTRY(777),
+	IAVF_PTT_UNUSED_ENTRY(778),
+	IAVF_PTT_UNUSED_ENTRY(779),
+
+	IAVF_PTT_UNUSED_ENTRY(780),
+	IAVF_PTT_UNUSED_ENTRY(781),
+	IAVF_PTT_UNUSED_ENTRY(782),
+	IAVF_PTT_UNUSED_ENTRY(783),
+	IAVF_PTT_UNUSED_ENTRY(784),
+	IAVF_PTT_UNUSED_ENTRY(785),
+	IAVF_PTT_UNUSED_ENTRY(786),
+	IAVF_PTT_UNUSED_ENTRY(787),
+	IAVF_PTT_UNUSED_ENTRY(788),
+	IAVF_PTT_UNUSED_ENTRY(789),
+
+	IAVF_PTT_UNUSED_ENTRY(790),
+	IAVF_PTT_UNUSED_ENTRY(791),
+	IAVF_PTT_UNUSED_ENTRY(792),
+	IAVF_PTT_UNUSED_ENTRY(793),
+	IAVF_PTT_UNUSED_ENTRY(794),
+	IAVF_PTT_UNUSED_ENTRY(795),
+	IAVF_PTT_UNUSED_ENTRY(796),
+	IAVF_PTT_UNUSED_ENTRY(797),
+	IAVF_PTT_UNUSED_ENTRY(798),
+	IAVF_PTT_UNUSED_ENTRY(799),
+
+	IAVF_PTT_UNUSED_ENTRY(800),
+	IAVF_PTT_UNUSED_ENTRY(801),
+	IAVF_PTT_UNUSED_ENTRY(802),
+	IAVF_PTT_UNUSED_ENTRY(803),
+	IAVF_PTT_UNUSED_ENTRY(804),
+	IAVF_PTT_UNUSED_ENTRY(805),
+	IAVF_PTT_UNUSED_ENTRY(806),
+	IAVF_PTT_UNUSED_ENTRY(807),
+	IAVF_PTT_UNUSED_ENTRY(808),
+	IAVF_PTT_UNUSED_ENTRY(809),
+
+	IAVF_PTT_UNUSED_ENTRY(810),
+	IAVF_PTT_UNUSED_ENTRY(811),
+	IAVF_PTT_UNUSED_ENTRY(812),
+	IAVF_PTT_UNUSED_ENTRY(813),
+	IAVF_PTT_UNUSED_ENTRY(814),
+	IAVF_PTT_UNUSED_ENTRY(815),
+	IAVF_PTT_UNUSED_ENTRY(816),
+	IAVF_PTT_UNUSED_ENTRY(817),
+	IAVF_PTT_UNUSED_ENTRY(818),
+	IAVF_PTT_UNUSED_ENTRY(819),
+
+	IAVF_PTT_UNUSED_ENTRY(820),
+	IAVF_PTT_UNUSED_ENTRY(821),
+	IAVF_PTT_UNUSED_ENTRY(822),
+	IAVF_PTT_UNUSED_ENTRY(823),
+	IAVF_PTT_UNUSED_ENTRY(824),
+	IAVF_PTT_UNUSED_ENTRY(825),
+	IAVF_PTT_UNUSED_ENTRY(826),
+	IAVF_PTT_UNUSED_ENTRY(827),
+	IAVF_PTT_UNUSED_ENTRY(828),
+	IAVF_PTT_UNUSED_ENTRY(829),
+
+	IAVF_PTT_UNUSED_ENTRY(830),
+	IAVF_PTT_UNUSED_ENTRY(831),
+	IAVF_PTT_UNUSED_ENTRY(832),
+	IAVF_PTT_UNUSED_ENTRY(833),
+	IAVF_PTT_UNUSED_ENTRY(834),
+	IAVF_PTT_UNUSED_ENTRY(835),
+	IAVF_PTT_UNUSED_ENTRY(836),
+	IAVF_PTT_UNUSED_ENTRY(837),
+	IAVF_PTT_UNUSED_ENTRY(838),
+	IAVF_PTT_UNUSED_ENTRY(839),
+
+	IAVF_PTT_UNUSED_ENTRY(840),
+	IAVF_PTT_UNUSED_ENTRY(841),
+	IAVF_PTT_UNUSED_ENTRY(842),
+	IAVF_PTT_UNUSED_ENTRY(843),
+	IAVF_PTT_UNUSED_ENTRY(844),
+	IAVF_PTT_UNUSED_ENTRY(845),
+	IAVF_PTT_UNUSED_ENTRY(846),
+	IAVF_PTT_UNUSED_ENTRY(847),
+	IAVF_PTT_UNUSED_ENTRY(848),
+	IAVF_PTT_UNUSED_ENTRY(849),
+
+	IAVF_PTT_UNUSED_ENTRY(850),
+	IAVF_PTT_UNUSED_ENTRY(851),
+	IAVF_PTT_UNUSED_ENTRY(852),
+	IAVF_PTT_UNUSED_ENTRY(853),
+	IAVF_PTT_UNUSED_ENTRY(854),
+	IAVF_PTT_UNUSED_ENTRY(855),
+	IAVF_PTT_UNUSED_ENTRY(856),
+	IAVF_PTT_UNUSED_ENTRY(857),
+	IAVF_PTT_UNUSED_ENTRY(858),
+	IAVF_PTT_UNUSED_ENTRY(859),
+
+	IAVF_PTT_UNUSED_ENTRY(860),
+	IAVF_PTT_UNUSED_ENTRY(861),
+	IAVF_PTT_UNUSED_ENTRY(862),
+	IAVF_PTT_UNUSED_ENTRY(863),
+	IAVF_PTT_UNUSED_ENTRY(864),
+	IAVF_PTT_UNUSED_ENTRY(865),
+	IAVF_PTT_UNUSED_ENTRY(866),
+	IAVF_PTT_UNUSED_ENTRY(867),
+	IAVF_PTT_UNUSED_ENTRY(868),
+	IAVF_PTT_UNUSED_ENTRY(869),
+
+	IAVF_PTT_UNUSED_ENTRY(870),
+	IAVF_PTT_UNUSED_ENTRY(871),
+	IAVF_PTT_UNUSED_ENTRY(872),
+	IAVF_PTT_UNUSED_ENTRY(873),
+	IAVF_PTT_UNUSED_ENTRY(874),
+	IAVF_PTT_UNUSED_ENTRY(875),
+	IAVF_PTT_UNUSED_ENTRY(876),
+	IAVF_PTT_UNUSED_ENTRY(877),
+	IAVF_PTT_UNUSED_ENTRY(878),
+	IAVF_PTT_UNUSED_ENTRY(879),
+
+	IAVF_PTT_UNUSED_ENTRY(880),
+	IAVF_PTT_UNUSED_ENTRY(881),
+	IAVF_PTT_UNUSED_ENTRY(882),
+	IAVF_PTT_UNUSED_ENTRY(883),
+	IAVF_PTT_UNUSED_ENTRY(884),
+	IAVF_PTT_UNUSED_ENTRY(885),
+	IAVF_PTT_UNUSED_ENTRY(886),
+	IAVF_PTT_UNUSED_ENTRY(887),
+	IAVF_PTT_UNUSED_ENTRY(888),
+	IAVF_PTT_UNUSED_ENTRY(889),
+
+	IAVF_PTT_UNUSED_ENTRY(890),
+	IAVF_PTT_UNUSED_ENTRY(891),
+	IAVF_PTT_UNUSED_ENTRY(892),
+	IAVF_PTT_UNUSED_ENTRY(893),
+	IAVF_PTT_UNUSED_ENTRY(894),
+	IAVF_PTT_UNUSED_ENTRY(895),
+	IAVF_PTT_UNUSED_ENTRY(896),
+	IAVF_PTT_UNUSED_ENTRY(897),
+	IAVF_PTT_UNUSED_ENTRY(898),
+	IAVF_PTT_UNUSED_ENTRY(899),
+
+	IAVF_PTT_UNUSED_ENTRY(900),
+	IAVF_PTT_UNUSED_ENTRY(901),
+	IAVF_PTT_UNUSED_ENTRY(902),
+	IAVF_PTT_UNUSED_ENTRY(903),
+	IAVF_PTT_UNUSED_ENTRY(904),
+	IAVF_PTT_UNUSED_ENTRY(905),
+	IAVF_PTT_UNUSED_ENTRY(906),
+	IAVF_PTT_UNUSED_ENTRY(907),
+	IAVF_PTT_UNUSED_ENTRY(908),
+	IAVF_PTT_UNUSED_ENTRY(909),
+
+	IAVF_PTT_UNUSED_ENTRY(910),
+	IAVF_PTT_UNUSED_ENTRY(911),
+	IAVF_PTT_UNUSED_ENTRY(912),
+	IAVF_PTT_UNUSED_ENTRY(913),
+	IAVF_PTT_UNUSED_ENTRY(914),
+	IAVF_PTT_UNUSED_ENTRY(915),
+	IAVF_PTT_UNUSED_ENTRY(916),
+	IAVF_PTT_UNUSED_ENTRY(917),
+	IAVF_PTT_UNUSED_ENTRY(918),
+	IAVF_PTT_UNUSED_ENTRY(919),
+
+	IAVF_PTT_UNUSED_ENTRY(920),
+	IAVF_PTT_UNUSED_ENTRY(921),
+	IAVF_PTT_UNUSED_ENTRY(922),
+	IAVF_PTT_UNUSED_ENTRY(923),
+	IAVF_PTT_UNUSED_ENTRY(924),
+	IAVF_PTT_UNUSED_ENTRY(925),
+	IAVF_PTT_UNUSED_ENTRY(926),
+	IAVF_PTT_UNUSED_ENTRY(927),
+	IAVF_PTT_UNUSED_ENTRY(928),
+	IAVF_PTT_UNUSED_ENTRY(929),
+
+	IAVF_PTT_UNUSED_ENTRY(930),
+	IAVF_PTT_UNUSED_ENTRY(931),
+	IAVF_PTT_UNUSED_ENTRY(932),
+	IAVF_PTT_UNUSED_ENTRY(933),
+	IAVF_PTT_UNUSED_ENTRY(934),
+	IAVF_PTT_UNUSED_ENTRY(935),
+	IAVF_PTT_UNUSED_ENTRY(936),
+	IAVF_PTT_UNUSED_ENTRY(937),
+	IAVF_PTT_UNUSED_ENTRY(938),
+	IAVF_PTT_UNUSED_ENTRY(939),
+
+	IAVF_PTT_UNUSED_ENTRY(940),
+	IAVF_PTT_UNUSED_ENTRY(941),
+	IAVF_PTT_UNUSED_ENTRY(942),
+	IAVF_PTT_UNUSED_ENTRY(943),
+	IAVF_PTT_UNUSED_ENTRY(944),
+	IAVF_PTT_UNUSED_ENTRY(945),
+	IAVF_PTT_UNUSED_ENTRY(946),
+	IAVF_PTT_UNUSED_ENTRY(947),
+	IAVF_PTT_UNUSED_ENTRY(948),
+	IAVF_PTT_UNUSED_ENTRY(949),
+
+	IAVF_PTT_UNUSED_ENTRY(950),
+	IAVF_PTT_UNUSED_ENTRY(951),
+	IAVF_PTT_UNUSED_ENTRY(952),
+	IAVF_PTT_UNUSED_ENTRY(953),
+	IAVF_PTT_UNUSED_ENTRY(954),
+	IAVF_PTT_UNUSED_ENTRY(955),
+	IAVF_PTT_UNUSED_ENTRY(956),
+	IAVF_PTT_UNUSED_ENTRY(957),
+	IAVF_PTT_UNUSED_ENTRY(958),
+	IAVF_PTT_UNUSED_ENTRY(959),
+
+	IAVF_PTT_UNUSED_ENTRY(960),
+	IAVF_PTT_UNUSED_ENTRY(961),
+	IAVF_PTT_UNUSED_ENTRY(962),
+	IAVF_PTT_UNUSED_ENTRY(963),
+	IAVF_PTT_UNUSED_ENTRY(964),
+	IAVF_PTT_UNUSED_ENTRY(965),
+	IAVF_PTT_UNUSED_ENTRY(966),
+	IAVF_PTT_UNUSED_ENTRY(967),
+	IAVF_PTT_UNUSED_ENTRY(968),
+	IAVF_PTT_UNUSED_ENTRY(969),
+
+	IAVF_PTT_UNUSED_ENTRY(970),
+	IAVF_PTT_UNUSED_ENTRY(971),
+	IAVF_PTT_UNUSED_ENTRY(972),
+	IAVF_PTT_UNUSED_ENTRY(973),
+	IAVF_PTT_UNUSED_ENTRY(974),
+	IAVF_PTT_UNUSED_ENTRY(975),
+	IAVF_PTT_UNUSED_ENTRY(976),
+	IAVF_PTT_UNUSED_ENTRY(977),
+	IAVF_PTT_UNUSED_ENTRY(978),
+	IAVF_PTT_UNUSED_ENTRY(979),
+
+	IAVF_PTT_UNUSED_ENTRY(980),
+	IAVF_PTT_UNUSED_ENTRY(981),
+	IAVF_PTT_UNUSED_ENTRY(982),
+	IAVF_PTT_UNUSED_ENTRY(983),
+	IAVF_PTT_UNUSED_ENTRY(984),
+	IAVF_PTT_UNUSED_ENTRY(985),
+	IAVF_PTT_UNUSED_ENTRY(986),
+	IAVF_PTT_UNUSED_ENTRY(987),
+	IAVF_PTT_UNUSED_ENTRY(988),
+	IAVF_PTT_UNUSED_ENTRY(989),
+
+	IAVF_PTT_UNUSED_ENTRY(990),
+	IAVF_PTT_UNUSED_ENTRY(991),
+	IAVF_PTT_UNUSED_ENTRY(992),
+	IAVF_PTT_UNUSED_ENTRY(993),
+	IAVF_PTT_UNUSED_ENTRY(994),
+	IAVF_PTT_UNUSED_ENTRY(995),
+	IAVF_PTT_UNUSED_ENTRY(996),
+	IAVF_PTT_UNUSED_ENTRY(997),
+	IAVF_PTT_UNUSED_ENTRY(998),
+	IAVF_PTT_UNUSED_ENTRY(999),
+
+	IAVF_PTT_UNUSED_ENTRY(1000),
+	IAVF_PTT_UNUSED_ENTRY(1001),
+	IAVF_PTT_UNUSED_ENTRY(1002),
+	IAVF_PTT_UNUSED_ENTRY(1003),
+	IAVF_PTT_UNUSED_ENTRY(1004),
+	IAVF_PTT_UNUSED_ENTRY(1005),
+	IAVF_PTT_UNUSED_ENTRY(1006),
+	IAVF_PTT_UNUSED_ENTRY(1007),
+	IAVF_PTT_UNUSED_ENTRY(1008),
+	IAVF_PTT_UNUSED_ENTRY(1009),
+
+	IAVF_PTT_UNUSED_ENTRY(1010),
+	IAVF_PTT_UNUSED_ENTRY(1011),
+	IAVF_PTT_UNUSED_ENTRY(1012),
+	IAVF_PTT_UNUSED_ENTRY(1013),
+	IAVF_PTT_UNUSED_ENTRY(1014),
+	IAVF_PTT_UNUSED_ENTRY(1015),
+	IAVF_PTT_UNUSED_ENTRY(1016),
+	IAVF_PTT_UNUSED_ENTRY(1017),
+	IAVF_PTT_UNUSED_ENTRY(1018),
+	IAVF_PTT_UNUSED_ENTRY(1019),
+
+	IAVF_PTT_UNUSED_ENTRY(1020),
+	IAVF_PTT_UNUSED_ENTRY(1021),
+	IAVF_PTT_UNUSED_ENTRY(1022),
+	IAVF_PTT_UNUSED_ENTRY(1023),
 };
 
 /**
@@ -924,7 +1769,7 @@ enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
  **/
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
-				enum iavf_status v_retval,
+				enum virtchnl_status_code v_retval,
 				u8 *msg, u16 msglen,
 				struct iavf_asq_cmd_details *cmd_details)
 {
@@ -1001,7 +1846,7 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
 {
 	return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
-				      IAVF_SUCCESS, NULL, 0, NULL);
+				      VIRTCHNL_STATUS_SUCCESS, NULL, 0, NULL);
 }
 
 /**
diff --git a/drivers/common/iavf/iavf_devids.h b/drivers/common/iavf/iavf_devids.h
index 2e63aac289..3e09e5feb8 100644
--- a/drivers/common/iavf/iavf_devids.h
+++ b/drivers/common/iavf/iavf_devids.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_DEVIDS_H_
@@ -13,5 +13,6 @@
 #define IAVF_DEV_ID_VF_HV		0x1571
 #define IAVF_DEV_ID_ADAPTIVE_VF		0x1889
 #define IAVF_DEV_ID_X722_VF		0x37CD
+#define IAVF_DEV_ID_X722_A0_VF          0x374D
 
 #endif /* _IAVF_DEVIDS_H_ */
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index f80878b9fd..4284585f5d 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Intel Corporation
+ * Copyright(c) 2019-2021 Intel Corporation
  */
 
 #include <stdio.h>
diff --git a/drivers/common/iavf/iavf_osdep.h b/drivers/common/iavf/iavf_osdep.h
index 7cba13ff74..0ea7ea0efe 100644
--- a/drivers/common/iavf/iavf_osdep.h
+++ b/drivers/common/iavf/iavf_osdep.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2020 Intel Corporation
+ * Copyright(c) 2017-2021 Intel Corporation
  */
 
 #ifndef _IAVF_OSDEP_H_
@@ -123,6 +123,11 @@ writeq(uint64_t value, volatile void *addr)
 #define IAVF_PCI_REG_WRITE(reg, value)         writel(value, reg)
 #define IAVF_PCI_REG_WRITE_RELAXED(reg, value) writel_relaxed(value, reg)
 
+#define IAVF_PCI_REG_WC_WRITE(reg, value) \
+	rte_write32_wc((rte_cpu_to_le_32(value)), reg)
+#define IAVF_PCI_REG_WC_WRITE_RELAXED(reg, value) \
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg)
+
 #define IAVF_READ_REG(hw, reg)                 rd32(hw, reg)
 #define IAVF_WRITE_REG(hw, reg, value)         wr32(hw, reg, value)
 
diff --git a/drivers/common/iavf/iavf_prototype.h b/drivers/common/iavf/iavf_prototype.h
index f34e77db0f..16cb973bb8 100644
--- a/drivers/common/iavf/iavf_prototype.h
+++ b/drivers/common/iavf/iavf_prototype.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_PROTOTYPE_H_
@@ -69,7 +69,7 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
 
 extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
 
-STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u16 ptype)
 {
 	return iavf_ptype_lookup[ptype];
 }
@@ -87,7 +87,7 @@ enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
-				enum iavf_status v_retval,
+				enum virtchnl_status_code v_retval,
 				u8 *msg, u16 msglen,
 				struct iavf_asq_cmd_details *cmd_details);
 enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
diff --git a/drivers/common/iavf/iavf_register.h b/drivers/common/iavf/iavf_register.h
index 03d62a9da7..328100138d 100644
--- a/drivers/common/iavf/iavf_register.h
+++ b/drivers/common/iavf/iavf_register.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_REGISTER_H_
diff --git a/drivers/common/iavf/iavf_status.h b/drivers/common/iavf/iavf_status.h
index f425638063..4dd0f5c5d8 100644
--- a/drivers/common/iavf/iavf_status.h
+++ b/drivers/common/iavf/iavf_status.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_STATUS_H_
@@ -19,7 +19,7 @@ enum iavf_status {
 	IAVF_ERR_ADAPTER_STOPPED		= -9,
 	IAVF_ERR_INVALID_MAC_ADDR		= -10,
 	IAVF_ERR_DEVICE_NOT_SUPPORTED		= -11,
-	IAVF_ERR_MASTER_REQUESTS_PENDING	= -12,
+	IAVF_ERR_PRIMARY_REQUESTS_PENDING	= -12,
 	IAVF_ERR_INVALID_LINK_SETTINGS		= -13,
 	IAVF_ERR_AUTONEG_NOT_COMPLETE		= -14,
 	IAVF_ERR_RESET_FAILED			= -15,
diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 0990c9aa33..49d262d795 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_TYPE_H_
@@ -141,6 +141,8 @@ enum iavf_debug_mask {
 #define IAVF_PHY_LED_MODE_MASK			0xFFFF
 #define IAVF_PHY_LED_MODE_ORIG			0x80000000
 
+#define IAVF_MAX_TRAFFIC_CLASS	8
+
 /* Memory types */
 enum iavf_memset_type {
 	IAVF_NONDMA_MEM = 0,
@@ -395,6 +397,45 @@ union iavf_16byte_rx_desc {
 	} wb;  /* writeback */
 };
 
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 2
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID higher 16-bits
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct iavf_32byte_rx_flex_wb {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	union {
+		struct {
+			__le16 rsvd;
+			__le16 flow_id_ipv6;
+		} flex;
+		__le32 ts_high;
+	} flex_ts;
+};
+
 union iavf_32byte_rx_desc {
 	struct {
 		__le64  pkt_addr; /* Packet buffer address */
@@ -442,6 +483,7 @@ union iavf_32byte_rx_desc {
 			} hi_dword;
 		} qword3;
 	} wb;  /* writeback */
+	struct iavf_32byte_rx_flex_wb flex_wb;
 };
 
 #define IAVF_RXD_QW0_MIRROR_STATUS_SHIFT	8
@@ -526,6 +568,51 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
 #define IAVF_RXD_QW1_PTYPE_SHIFT	30
 #define IAVF_RXD_QW1_PTYPE_MASK		(0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
 
+/* for iavf_32byte_rx_flex_wb.ptype_flexi_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+/* for iavf_32byte_rx_flex_wb.pkt_length member */
+#define IAVF_RX_FLEX_DESC_PKT_LEN_M	(0x3FFF) /* 14-bits */
+
+enum iavf_rx_flex_desc_status_error_0_bits {
+	/* Note: These are predefined bit offsets */
+	IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
+	IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
+	IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
+	IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
+	IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+	IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
+	IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+enum iavf_rx_flex_desc_status_error_1_bits {
+	/* Note: These are predefined bit offsets */
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
+	/* [10:6] reserved */
+	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
 /* Packet type non-ip values */
 enum iavf_rx_l2_ptype {
 	IAVF_RX_PTYPE_L2_RESERVED			= 0,
@@ -558,7 +645,7 @@ enum iavf_rx_l2_ptype {
 };
 
 struct iavf_rx_ptype_decoded {
-	u32 ptype:8;
+	u32 ptype:10;
 	u32 known:1;
 	u32 outer_ip:1;
 	u32 outer_ip_ver:1;
@@ -721,6 +808,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/meson.build b/drivers/common/iavf/meson.build
index 1f4d8b898d..977652223b 100644
--- a/drivers/common/iavf/meson.build
+++ b/drivers/common/iavf/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2019-2020 Intel Corporation
+# Copyright(c) 2019-2021 Intel Corporation
 
 sources = files('iavf_adminq.c', 'iavf_common.c', 'iavf_impl.c')
 
diff --git a/drivers/common/iavf/siov_regs.h b/drivers/common/iavf/siov_regs.h
new file mode 100644
index 0000000000..d921269a5e
--- /dev/null
+++ b/drivers/common/iavf/siov_regs.h
@@ -0,0 +1,47 @@
+#ifndef _SIOV_REGS_H_
+#define _SIOV_REGS_H_
+#define VDEV_MBX_START			0x20000 /* Begin at 128KB */
+#define VDEV_MBX_ATQBAL			(VDEV_MBX_START + 0x0000)
+#define VDEV_MBX_ATQBAH			(VDEV_MBX_START + 0x0004)
+#define VDEV_MBX_ATQLEN			(VDEV_MBX_START + 0x0008)
+#define VDEV_MBX_ATQH			(VDEV_MBX_START + 0x000C)
+#define VDEV_MBX_ATQT			(VDEV_MBX_START + 0x0010)
+#define VDEV_MBX_ARQBAL			(VDEV_MBX_START + 0x0014)
+#define VDEV_MBX_ARQBAH			(VDEV_MBX_START + 0x0018)
+#define VDEV_MBX_ARQLEN			(VDEV_MBX_START + 0x001C)
+#define VDEV_MBX_ARQH			(VDEV_MBX_START + 0x0020)
+#define VDEV_MBX_ARQT			(VDEV_MBX_START + 0x0024)
+#define VDEV_GET_RSTAT			0x21000 /* 132KB for RSTAT */
+
+/* Begin at offset after 1MB (after 256 4k pages) */
+#define VDEV_QRX_TAIL_START       0x100000
+ /* 2k Rx queues */
+#define VDEV_QRX_TAIL(_i)         (VDEV_QRX_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 9MB for  Rx buffer queue tail register pages */
+#define VDEV_QRX_BUFQ_TAIL_START  0x900000
+/* 2k Rx buffer queues */
+#define VDEV_QRX_BUFQ_TAIL(_i)    (VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 17MB for 2k Tx queues */
+#define VDEV_QTX_TAIL_START       0x1100000
+/* 2k Tx queues */
+#define VDEV_QTX_TAIL(_i)         (VDEV_QTX_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 25MB for 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL_START 0x1900000
+/* 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL(_i)   (VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000))
+
+#define VDEV_INT_DYN_CTL01        0x2100000 /* Begin at offset 33MB */
+
+/* Begin at offset of 33MB + 4k to accomdate CTL01 register */
+#define VDEV_INT_DYN_START   (VDEV_INT_DYN_CTL01 + 0x1000)
+#define VDEV_INT_DYN_CTL(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000))
+#define VDEV_INT_ITR_0(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04)
+#define VDEV_INT_ITR_1(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08)
+#define VDEV_INT_ITR_2(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C)
+
+/* Next offset to begin at 42MB (0x2A00000) */
+#endif /* _SIOV_REGS_H_ */
+
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 4c34d35ba7..d015a785b9 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -1,13 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _VIRTCHNL_H_
 #define _VIRTCHNL_H_
 
 /* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
  *
  * Admin queue buffer usage:
  * desc->opcode is always aqc_opc_send_msg_to_pf
@@ -21,8 +22,8 @@
  * have a maximum of sixteen queues for all of its VSIs.
  *
  * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
  *
  * In general, VF driver initialization should roughly follow the order of
  * these opcodes. The VF driver must first validate the API version of the
@@ -37,7 +38,13 @@
  * value in current and future projects
  */
 
-/* Error Codes */
+#include "virtchnl_inline_ipsec.h"
+
+/* Error Codes
+ * Note that many older versions of various iAVF drivers convert the reported
+ * status code directly into an iavf_status enumeration. For this reason, it
+ * is important that the values of these enumerations line up.
+ */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
@@ -84,6 +91,10 @@ enum virtchnl_rx_hsplit {
 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
 };
 
+enum virtchnl_bw_limit_type {
+	VIRTCHNL_BW_SHAPER = 0,
+};
+
 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS	6
 /* END GENERIC DEFINES */
 
@@ -128,7 +139,10 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, 37 and 38 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
+	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
+	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
 	VIRTCHNL_OP_DCF_CMD_BUFF = 40,
 	VIRTCHNL_OP_DCF_DISABLE = 41,
@@ -139,14 +153,162 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
-	VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
 	VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
+	VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+	VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+	VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+	VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
+	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_GET_QOS_CAPS = 66,
+	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_MAX,
 };
 
+static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
+{
+	switch (v_opcode) {
+	case VIRTCHNL_OP_UNKNOWN:
+		return "VIRTCHNL_OP_UNKNOWN";
+	case VIRTCHNL_OP_VERSION:
+		return "VIRTCHNL_OP_VERSION";
+	case VIRTCHNL_OP_RESET_VF:
+		return "VIRTCHNL_OP_RESET_VF";
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		return "VIRTCHNL_OP_GET_VF_RESOURCES";
+	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_TX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_RX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		return "VIRTCHNL_OP_CONFIG_VSI_QUEUES";
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		return "VIRTCHNL_OP_CONFIG_IRQ_MAP";
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+		return "VIRTCHNL_OP_ENABLE_QUEUES";
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		return "VIRTCHNL_OP_DISABLE_QUEUES";
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+		return "VIRTCHNL_OP_ADD_ETH_ADDR";
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		return "VIRTCHNL_OP_DEL_ETH_ADDR";
+	case VIRTCHNL_OP_ADD_VLAN:
+		return "VIRTCHNL_OP_ADD_VLAN";
+	case VIRTCHNL_OP_DEL_VLAN:
+		return "VIRTCHNL_OP_DEL_VLAN";
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE";
+	case VIRTCHNL_OP_GET_STATS:
+		return "VIRTCHNL_OP_GET_STATS";
+	case VIRTCHNL_OP_RSVD:
+		return "VIRTCHNL_OP_RSVD";
+	case VIRTCHNL_OP_EVENT:
+		return "VIRTCHNL_OP_EVENT";
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		return "VIRTCHNL_OP_CONFIG_RSS_KEY";
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		return "VIRTCHNL_OP_CONFIG_RSS_LUT";
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		return "VIRTCHNL_OP_GET_RSS_HENA_CAPS";
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		return "VIRTCHNL_OP_SET_RSS_HENA";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		return "VIRTCHNL_OP_REQUEST_QUEUES";
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		return "VIRTCHNL_OP_ENABLE_CHANNELS";
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		return "VIRTCHNL_OP_DISABLE_CHANNELS";
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
+	case VIRTCHNL_OP_DCF_CMD_DESC:
+		return "VIRTCHNL_OP_DCF_CMD_DESC";
+	case VIRTCHNL_OP_DCF_CMD_BUFF:
+		return "VIRTCHHNL_OP_DCF_CMD_BUFF";
+	case VIRTCHNL_OP_DCF_DISABLE:
+		return "VIRTCHNL_OP_DCF_DISABLE";
+	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+		return "VIRTCHNL_OP_DCF_GET_VSI_MAP";
+	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+		return "VIRTCHNL_OP_GET_SUPPORTED_RXDIDS";
+	case VIRTCHNL_OP_ADD_RSS_CFG:
+		return "VIRTCHNL_OP_ADD_RSS_CFG";
+	case VIRTCHNL_OP_DEL_RSS_CFG:
+		return "VIRTCHNL_OP_DEL_RSS_CFG";
+	case VIRTCHNL_OP_ADD_FDIR_FILTER:
+		return "VIRTCHNL_OP_ADD_FDIR_FILTER";
+	case VIRTCHNL_OP_DEL_FDIR_FILTER:
+		return "VIRTCHNL_OP_DEL_FDIR_FILTER";
+	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
+		return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
+	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
+	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
+	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
+		return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+		return "VIRTCHNL_OP_ADD_VLAN_V2";
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		return "VIRTCHNL_OP_DEL_VLAN_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_MAX:
+		return "VIRTCHNL_OP_MAX";
+	default:
+		return "Unsupported (update virtchnl.h)";
+	}
+}
+
+static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)
+{
+	switch (v_status) {
+	case VIRTCHNL_STATUS_SUCCESS:
+		return "VIRTCHNL_STATUS_SUCCESS";
+	case VIRTCHNL_STATUS_ERR_PARAM:
+		return "VIRTCHNL_STATUS_ERR_PARAM";
+	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+		return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
+	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
+	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
+	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
+	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+		return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
+	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+		return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
+	default:
+		return "Unknown status code (update virtchnl.h)";
+	}
+}
+
 /* These macros are used to generate compilation errors if a structure/union
  * is not exactly the correct length. It gives a divide by zero error if the
  * structure/union is not of the correct size, otherwise it creates an enum
@@ -163,8 +325,12 @@ enum virtchnl_ops {
 
 struct virtchnl_msg {
 	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
-	enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
-	enum virtchnl_status_code v_retval;  /* ditto for desc->retval */
+
+	/* avoid confusion with desc->opcode */
+	enum virtchnl_ops v_opcode;
+
+	/* ditto for desc->retval */
+	enum virtchnl_status_code v_retval;
 	u32 vfid;			 /* used by PF when sending to VF */
 };
 
@@ -230,7 +396,9 @@ enum virtchnl_vsi_type {
 struct virtchnl_vsi_resource {
 	u16 vsi_id;
 	u16 num_queue_pairs;
-	enum virtchnl_vsi_type vsi_type;
+
+	/* see enum virtchnl_vsi_type */
+	s32 vsi_type;
 	u16 qset_handle;
 	u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
 };
@@ -241,34 +409,35 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
  */
-#define VIRTCHNL_VF_OFFLOAD_L2			0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD		0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG		0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		0x00000040
+#define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_IWARP		BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RSVD		BIT(2)
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		0x00000080
-	/* 0X00000100 is reserved */
-#define VIRTCHNL_VF_LARGE_NUM_QPAIRS		0x00000200
-#define VIRTCHNL_VF_OFFLOAD_CRC			0x00000400
-#define VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP		0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ			0X00800000
-#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		0X01000000
-#define VIRTCHNL_VF_OFFLOAD_USO			0X02000000
-#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	0X04000000
-#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		0X08000000
-#define VIRTCHNL_VF_OFFLOAD_FDIR_PF		0X10000000
-	/* 0X20000000 is reserved */
-#define VIRTCHNL_VF_CAP_DCF			0X40000000
-	/* 0X80000000 is reserved */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
+#define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
+#define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		BIT(24)
+#define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS		BIT(29)
+#define VIRTCHNL_VF_CAP_DCF			BIT(30)
+	/* BIT(31) is reserved */
 
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -307,6 +476,54 @@ struct virtchnl_txq_info {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
 
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+	VIRTCHNL_RXDID_0_16B_BASE		= 0,
+	/* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors
+	 * because they can be differentiated based on queue model; e.g. single
+	 * queue model can only use 32B_BASE and split queue model can only use
+	 * FLEX_SPLITQ.  Having these as 1 allows them to be used as default
+	 * descriptors without negotiation.
+	 */
+	VIRTCHNL_RXDID_1_32B_BASE		= 1,
+	VIRTCHNL_RXDID_1_FLEX_SPLITQ		= 1,
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC		= 2,
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW		= 3,
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB	= 4,
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL	= 5,
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2		= 6,
+	VIRTCHNL_RXDID_7_HW_RSVD		= 7,
+	/* 9 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC		= 16,
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN	= 17,
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4	= 18,
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6	= 19,
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW	= 20,
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP		= 21,
+	/* 22 through 63 are reserved */
+};
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+	VIRTCHNL_RXDID_0_16B_BASE_M		= BIT(VIRTCHNL_RXDID_0_16B_BASE),
+	VIRTCHNL_RXDID_1_32B_BASE_M		= BIT(VIRTCHNL_RXDID_1_32B_BASE),
+	VIRTCHNL_RXDID_1_FLEX_SPLITQ_M		= BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ),
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M		= BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW_M		= BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M	= BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M	= BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M	= BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),
+	VIRTCHNL_RXDID_7_HW_RSVD_M		= BIT(VIRTCHNL_RXDID_7_HW_RSVD),
+	/* 9 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC_M	= BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M	= BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M	= BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M	= BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M	= BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M	= BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),
+	/* 22 through 63 are reserved */
+};
+
 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
  * VF sends this message to set up parameters for one RX queue.
  * External data buffer contains one instance of virtchnl_rxq_info.
@@ -329,11 +546,17 @@ struct virtchnl_rxq_info {
 	u32 databuffer_size;
 	u32 max_pkt_size;
 	u8 crc_disable;
-	/* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
+	/* see enum virtchnl_rx_desc_ids;
+	 * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
+	 * that when the offload is not supported, the descriptor format aligns
+	 * with VIRTCHNL_RXDID_1_32B_BASE.
+	 */
 	u8 rxdid;
 	u8 pad1[2];
 	u64 dma_ring_addr;
-	enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+	s32 rx_split_pos;
 	u32 pad2;
 };
 
@@ -536,6 +759,388 @@ struct virtchnl_vlan_filter_list {
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
 
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ *	VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ *	VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+	VIRTCHNL_VLAN_UNSUPPORTED =		0,
+	VIRTCHNL_VLAN_ETHERTYPE_8100 =		0x00000001,
+	VIRTCHNL_VLAN_ETHERTYPE_88A8 =		0x00000002,
+	VIRTCHNL_VLAN_ETHERTYPE_9100 =		0x00000004,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =	0x00000100,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =	0x00000200,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =	0x00000400,
+	VIRTCHNL_VLAN_PRIO =			0x01000000,
+	VIRTCHNL_VLAN_FILTER_MASK =		0x10000000,
+	VIRTCHNL_VLAN_ETHERTYPE_AND =		0x20000000,
+	VIRTCHNL_VLAN_ETHERTYPE_XOR =		0x40000000,
+	VIRTCHNL_VLAN_TOGGLE =			0x80000000
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+	u32 outer;
+	u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+	struct virtchnl_vlan_supported_caps filtering_support;
+	u32 ethertype_init;
+	u16 max_filters;
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+	VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+	VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+	struct virtchnl_vlan_supported_caps stripping_support;
+	struct virtchnl_vlan_supported_caps insertion_support;
+	u32 ethertype_init;
+	u8 ethertype_match;
+	u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+	struct virtchnl_vlan_filtering_caps filtering;
+	struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+	u16 tci;	/* tci[15:13] = PCP and tci[11:0] = VID */
+	u16 tci_mask;	/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+			 * filtering caps
+			 */
+	u16 tpid;	/* 0x8100, 0x88a8, etc. and only type(s) set in
+			 * filtering caps. Note that tpid here does not refer to
+			 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+			 * actual 2-byte VLAN TPID
+			 */
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+	struct virtchnl_vlan inner;
+	struct virtchnl_vlan outer;
+	u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+	u16 vport_id;
+	u16 num_elements;
+	u8 pad[4];
+	struct virtchnl_vlan_filter filters[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ *			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2
+ *
+ * VF sends this message to enable or disable VLAN filtering. It also needs to
+ * specify an ethertype. The VF knows which VLAN ethertypes are allowed and
+ * whether or not it's allowed to enable/disable filtering via the
+ * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.filtering fields to determine which, if any,
+ * filtering messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.filtering in the
+ * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8
+ * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND
+ * means that all filtering ethertypes will to be enabled and disabled together
+ * regardless of the request from the VF. This means that the underlying
+ * hardware only supports VLAN filtering for all VLAN the specified ethertypes
+ * or none of them.
+ *
+ * virtchnl_vlan_caps.filtering.filtering_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100
+ * VLANs aren't supported by the VF driver), the VF would populate the
+ * virtchnl_vlan_setting structure in the following manner and send the
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used
+ * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ *
+ */
+struct virtchnl_vlan_setting {
+	u32 outer_ethertype_setting;
+	u32 inner_ethertype_setting;
+	u16 vport_id;
+	u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
+
 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
  * VF sends VSI id and flags.
  * PF returns status code in retval.
@@ -706,14 +1311,26 @@ enum virtchnl_flow_type {
 struct virtchnl_filter {
 	union	virtchnl_flow_spec data;
 	union	virtchnl_flow_spec mask;
-	enum	virtchnl_flow_type flow_type;
-	enum	virtchnl_action action;
+
+	/* see enum virtchnl_flow_type */
+	s32	flow_type;
+
+	/* see enum virtchnl_action */
+	s32	action;
 	u32	action_meta;
 	u8	field_flags;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
 
+struct virtchnl_shaper_bw {
+	/* Unit is Kbps */
+	u32 committed;
+	u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
 /* VIRTCHNL_OP_DCF_GET_VSI_MAP
  * VF sends this message to get VSI mapping table.
  * PF responds with an indirect message containing VF's
@@ -754,7 +1371,71 @@ struct virtchnl_pkg_info {
 
 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
 
+/* VIRTCHNL_OP_DCF_VLAN_OFFLOAD
+ * DCF negotiates the VIRTCHNL_VF_OFFLOAD_VLAN_V2 capability firstly to get
+ * the double VLAN configuration, then DCF sends this message to configure the
+ * outer or inner VLAN offloads (insertion and strip) for the target VF.
+ */
+struct virtchnl_dcf_vlan_offload {
+	u16 vf_id;
+	u16 tpid;
+	u16 vlan_flags;
+#define VIRTCHNL_DCF_VLAN_TYPE_S		0
+#define VIRTCHNL_DCF_VLAN_TYPE_M		\
+			(0x1 << VIRTCHNL_DCF_VLAN_TYPE_S)
+#define VIRTCHNL_DCF_VLAN_TYPE_INNER		0x0
+#define VIRTCHNL_DCF_VLAN_TYPE_OUTER		0x1
+#define VIRTCHNL_DCF_VLAN_INSERT_MODE_S		1
+#define VIRTCHNL_DCF_VLAN_INSERT_MODE_M	\
+			(0x7 << VIRTCHNL_DCF_VLAN_INSERT_MODE_S)
+#define VIRTCHNL_DCF_VLAN_INSERT_DISABLE	0x1
+#define VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED	0x2
+#define VIRTCHNL_DCF_VLAN_INSERT_VIA_TX_DESC	0x3
+#define VIRTCHNL_DCF_VLAN_STRIP_MODE_S		4
+#define VIRTCHNL_DCF_VLAN_STRIP_MODE_M		\
+			(0x7 << VIRTCHNL_DCF_VLAN_STRIP_MODE_S)
+#define VIRTCHNL_DCF_VLAN_STRIP_DISABLE		0x1
+#define VIRTCHNL_DCF_VLAN_STRIP_ONLY		0x2
+#define VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC	0x3
+	u16 vlan_id;
+	u16 pad[4];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_dcf_vlan_offload);
+
+struct virtchnl_dcf_bw_cfg {
+	u8 tc_num;
+#define VIRTCHNL_DCF_BW_CIR		BIT(0)
+#define VIRTCHNL_DCF_BW_PIR		BIT(1)
+	u8 bw_type;
+	u8 pad[2];
+	enum virtchnl_bw_limit_type type;
+	union {
+		struct virtchnl_shaper_bw shaper;
+		u8 pad2[32];
+	};
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_dcf_bw_cfg);
+
+/* VIRTCHNL_OP_DCF_CONFIG_BW
+ * VF send this message to set the bandwidth configuration of each
+ * TC with a specific vf id. The flag node_type is to indicate that
+ * this message is to configure VSI node or TC node bandwidth.
+ */
+struct virtchnl_dcf_bw_cfg_list {
+	u16 vf_id;
+	u8 num_elem;
+#define VIRTCHNL_DCF_TARGET_TC_BW	0
+#define VIRTCHNL_DCF_TARGET_VF_BW	1
+	u8 node_type;
+	struct virtchnl_dcf_bw_cfg cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_dcf_bw_cfg_list);
+
 struct virtchnl_supported_rxdids {
+	/* see enum virtchnl_rx_desc_id_bitmasks */
 	u64 supported_rxdids;
 };
 
@@ -779,7 +1460,8 @@ enum virtchnl_event_codes {
 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
 
 struct virtchnl_pf_event {
-	enum virtchnl_event_codes event;
+	/* see enum virtchnl_event_codes */
+	s32 event;
 	union {
 		/* If the PF driver does not support the new speed reporting
 		 * capabilities then use link_event else use link_event_adv to
@@ -890,6 +1572,16 @@ enum virtchnl_proto_hdr_type {
 	VIRTCHNL_PROTO_HDR_AH,
 	VIRTCHNL_PROTO_HDR_PFCP,
 	VIRTCHNL_PROTO_HDR_GTPC,
+	VIRTCHNL_PROTO_HDR_ECPRI,
+	VIRTCHNL_PROTO_HDR_L2TPV2,
+	VIRTCHNL_PROTO_HDR_PPP,
+	/* IPv4 and IPv6 Fragment header types are only associated to
+	 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+	 * cannot be used independently.
+	 */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+	VIRTCHNL_PROTO_HDR_GRE,
 };
 
 /* Protocol header field within a protocol header. */
@@ -912,6 +1604,7 @@ enum virtchnl_proto_hdr_field {
 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
+	VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
 	/* IPV6 */
 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
@@ -936,14 +1629,17 @@ enum virtchnl_proto_hdr_field {
 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
 	/* UDP */
 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
 	/* SCTP */
 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
 	/* GTPU_IP */
 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
@@ -970,10 +1666,30 @@ enum virtchnl_proto_hdr_field {
 	/* GTPC */
 	VIRTCHNL_PROTO_HDR_GTPC_TEID =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+	/* ECPRI */
+	VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+	VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+	/* IPv4 Dummy Fragment */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+	/* IPv6 Extension Fragment */
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+	/* GTPU_DWN/UP */
+	VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+	VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+	/* L2TPv2 */
+	VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+	VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
 };
 
 struct virtchnl_proto_hdr {
-	enum virtchnl_proto_hdr_type type;
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
 	u32 field_selector; /* a bit mask to select field for header type */
 	u8 buffer[64];
 	/**
@@ -1002,7 +1718,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
 
 struct virtchnl_rss_cfg {
 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
-	enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
+
+	/* see enum virtchnl_rss_algorithm; rss algorithm type */
+	s32 rss_algorithm;
 	u8 reserved[128];                          /* reserve for future */
 };
 
@@ -1010,7 +1728,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
 /* action configuration for FDIR */
 struct virtchnl_filter_action {
-	enum virtchnl_action type;
+	/* see enum virtchnl_action type */
+	s32 type;
 	union {
 		/* used for queue and qgroup action */
 		struct {
@@ -1049,20 +1768,6 @@ struct virtchnl_fdir_rule {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
 
-/* query information to retrieve fdir rule counters.
- * PF will fill out this structure to reset counter.
- */
-struct virtchnl_fdir_query_info {
-	u32 match_packets_valid:1;
-	u32 match_bytes_valid:1;
-	u32 reserved:30;  /* Reserved, must be zero. */
-	u32 pad;
-	u64 matched_packets; /* Number of packets for this rule. */
-	u64 matched_bytes;   /* Number of bytes through this rule. */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
-
 /* Status returned to VF after VF requests FDIR commands
  * VIRTCHNL_FDIR_SUCCESS
  * VF FDIR related request is successfully done by PF
@@ -1117,7 +1822,9 @@ struct virtchnl_fdir_add {
 	u16 validate_only; /* INPUT */
 	u32 flow_id;       /* OUTPUT */
 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
-	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
@@ -1130,27 +1837,69 @@ struct virtchnl_fdir_del {
 	u16 vsi_id;  /* INPUT */
 	u16 pad;
 	u32 flow_id; /* INPUT */
-	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
-/* VIRTCHNL_OP_QUERY_FDIR_FILTER
- * VF sends this request to PF by filling out vsi_id,
- * flow_id and reset_counter. PF will return query_info
- * and query_status to VF.
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
  */
-struct virtchnl_fdir_query {
-	u16 vsi_id;   /* INPUT */
-	u16 pad1[3];
-	u32 flow_id;  /* INPUT */
-	u32 reset_counter:1; /* INPUT */
-	struct virtchnl_fdir_query_info query_info; /* OUTPUT */
-	enum virtchnl_fdir_prgm_status status;  /* OUTPUT */
-	u32 pad2;
+struct virtchnl_qos_cap_elem {
+	u8 tc_num;
+	u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT      0
+#define VIRTCHNL_ABITER_ETS         2
+	u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT      1
+	u8 weight;
+	enum virtchnl_bw_limit_type type;
+	union {
+		struct virtchnl_shaper_bw shaper;
+		u8 pad2[32];
+	};
 };
 
-VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+	u16 vsi_id;
+	u16 num_elem;
+	struct virtchnl_qos_cap_elem cap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP
+ * VF sends message virtchnl_queue_tc_mapping to set queue to tc
+ * mapping for all the Tx and Rx queues with a specified VSI, and
+ * would get response about bitmap of valid user priorities
+ * associated with queues.
+ */
+struct virtchnl_queue_tc_mapping {
+	u16 vsi_id;
+	u16 num_tc;
+	u16 num_queue_pairs;
+	u8 pad[2];
+	union {
+		struct {
+			u16 start_queue_id;
+			u16 queue_count;
+		} req;
+		struct {
+#define VIRTCHNL_USER_PRIO_TYPE_UP	0
+#define VIRTCHNL_USER_PRIO_TYPE_DSCP	1
+			u16 prio_type;
+			u16 valid_prio_bitmap;
+		} resp;
+	} tc[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
+
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1169,7 +1918,8 @@ enum virtchnl_queue_type {
 
 /* structure to specify a chunk of contiguous queues */
 struct virtchnl_queue_chunk {
-	enum virtchnl_queue_type type;
+	/* see enum virtchnl_queue_type */
+	s32 type;
 	u16 start_queue_id;
 	u16 num_queues;
 };
@@ -1222,8 +1972,12 @@ struct virtchnl_queue_vector {
 	u16 queue_id;
 	u16 vector_id;
 	u8 pad[4];
-	enum virtchnl_itr_idx itr_idx;
-	enum virtchnl_queue_type queue_type;
+
+	/* see enum virtchnl_itr_idx */
+	s32 itr_idx;
+
+	/* see enum virtchnl_queue_type */
+	s32 queue_type;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
@@ -1286,6 +2040,10 @@ enum virtchnl_vector_limits {
 	VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX	=
 		((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
 		sizeof(struct virtchnl_queue_vector),
+
+	VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /
+		sizeof(struct virtchnl_vlan_filter),
 };
 
 /**
@@ -1460,6 +2218,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		valid_len = sizeof(struct virtchnl_filter);
 		break;
+	case VIRTCHNL_OP_DCF_VLAN_OFFLOAD:
+		valid_len = sizeof(struct virtchnl_dcf_vlan_offload);
+		break;
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
 		/* These two opcodes are specific to handle the AdminQ command,
@@ -1471,6 +2232,19 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
 	case VIRTCHNL_OP_DCF_GET_PKG_INFO:
 		break;
+	case VIRTCHNL_OP_DCF_CONFIG_BW:
+		valid_len = sizeof(struct virtchnl_dcf_bw_cfg_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_dcf_bw_cfg_list *cfg_list =
+				(struct virtchnl_dcf_bw_cfg_list *)msg;
+			if (cfg_list->num_elem == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (cfg_list->num_elem - 1) *
+					 sizeof(struct virtchnl_dcf_bw_cfg);
+		}
+		break;
 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
 		break;
 	case VIRTCHNL_OP_ADD_RSS_CFG:
@@ -1483,8 +2257,47 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
-	case VIRTCHNL_OP_QUERY_FDIR_FILTER:
-		valid_len = sizeof(struct virtchnl_fdir_query);
+	case VIRTCHNL_OP_GET_QOS_CAPS:
+		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
+		valid_len = sizeof(struct virtchnl_queue_tc_mapping);
+		if (msglen >= valid_len) {
+			struct virtchnl_queue_tc_mapping *q_tc =
+				(struct virtchnl_queue_tc_mapping *)msg;
+			if (q_tc->num_tc == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_tc->num_tc - 1) *
+					 sizeof(q_tc->tc[0]);
+		}
+		break;
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		break;
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+		if (msglen >= valid_len) {
+			struct virtchnl_vlan_filter_list_v2 *vfl =
+			    (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+			if (vfl->num_elements == 0 || vfl->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vfl->num_elements - 1) *
+				sizeof(struct virtchnl_vlan_filter);
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		valid_len = sizeof(struct virtchnl_vlan_setting);
 		break;
 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
@@ -1515,6 +2328,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..2f4bf15725
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+
+	/* 0 for NAT-T unsupported, 1 for NAT-T supported */
+	u8 is_udp;
+
+	/* reserved */
+	u8 reserved;
+
+	/* NAT-T UDP port number. Only valid in case NAT-T supported */
+	u16 udp_port;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 6af2993116..b1da53db2b 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -6,7 +6,7 @@ Intel® I40E driver
 ==================
 
 This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2020.08.27.tar.gz released by the team which develops
+cid-i40e.2022.03.08.tar.gz released by the team which develops
 basic drivers for any i40e NIC. The directory of base/ contains the
 original source package.
 This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index c63a38e900..27c82d9b44 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -840,7 +840,7 @@ STATIC bool i40e_asq_done(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_asq_send_command - send command to Admin Queue
+ *  i40e_asq_send_command_exec - send command to Admin Queue
  *  @hw: pointer to the hw struct
  *  @desc: prefilled descriptor describing the command (non DMA mem)
  *  @buff: buffer to use for indirect commands
@@ -850,11 +850,12 @@ STATIC bool i40e_asq_done(struct i40e_hw *hw)
  *  This is the main send command driver routine for the Admin Queue send
  *  queue.  It runs the queue, cleans the queue, etc
  **/
-enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
-				struct i40e_aq_desc *desc,
-				void *buff, /* can be NULL */
-				u16  buff_size,
-				struct i40e_asq_cmd_details *cmd_details)
+STATIC enum i40e_status_code
+i40e_asq_send_command_exec(struct i40e_hw *hw,
+			   struct i40e_aq_desc *desc,
+			   void *buff, /* can be NULL */
+			   u16  buff_size,
+			   struct i40e_asq_cmd_details *cmd_details)
 {
 	enum i40e_status_code status = I40E_SUCCESS;
 	struct i40e_dma_mem *dma_buff = NULL;
@@ -864,8 +865,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 	u16  retval = 0;
 	u32  val = 0;
 
-	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
 
 	if (hw->aq.asq.count == 0) {
@@ -1048,6 +1047,64 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 	}
 
 asq_send_command_error:
+	return status;
+}
+
+/**
+ *  i40e_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  Acquires the lock and calls the main send command execution
+ *  routine.
+ **/
+enum i40e_status_code
+i40e_asq_send_command(struct i40e_hw *hw,
+		      struct i40e_aq_desc *desc,
+		      void *buff, /* can be NULL */
+		      u16  buff_size,
+		      struct i40e_asq_cmd_details *cmd_details)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+	status = i40e_asq_send_command_exec(hw, desc, buff, buff_size,
+					    cmd_details);
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
+	return status;
+}
+
+/**
+ *  i40e_asq_send_command_v2 - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *  @aq_status: pointer to Admin Queue status return value
+ *
+ *  Acquires the lock and calls the main send command execution
+ *  routine. Returns the last Admin Queue status in aq_status
+ *  to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+enum i40e_status_code
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+			 struct i40e_aq_desc *desc,
+			 void *buff, /* can be NULL */
+			 u16  buff_size,
+			 struct i40e_asq_cmd_details *cmd_details,
+			 enum i40e_admin_queue_err *aq_status)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+	status = i40e_asq_send_command_exec(hw, desc, buff, buff_size,
+					    cmd_details);
+	if (aq_status)
+		*aq_status = hw->aq.asq_last_status;
 	i40e_release_spinlock(&hw->aq.asq_spinlock);
 	return status;
 }
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 4d80568050..def307b59d 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -12,8 +12,8 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR_X722	0x000B
-#define I40E_FW_API_VERSION_MINOR_X710	0x000C
+#define I40E_FW_API_VERSION_MINOR_X722	0x000C
+#define I40E_FW_API_VERSION_MINOR_X710	0x000F
 
 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
 					I40E_FW_API_VERSION_MINOR_X710 : \
@@ -768,6 +768,7 @@ struct i40e_aqc_set_switch_config {
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC		0x0001
 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER	0x0002
 #define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT	0x0004
+#define I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN	0x0008
 	__le16	valid_flags;
 	/* The ethertype in switch_tag is dropped on ingress and used
 	 * internally by the switch. Set this to zero for the default
@@ -904,7 +905,7 @@ struct i40e_aqc_vsi_properties_data {
 	u8	sec_reserved;
 	/* VLAN section */
 	__le16	pvid; /* VLANS include priority bits */
-	__le16	fcoe_pvid;
+	__le16	outer_vlan;
 	u8	port_vlan_flags;
 #define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
 #define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
@@ -920,7 +921,24 @@ struct i40e_aqc_vsi_properties_data {
 #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
 #define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
-	u8	pvlan_reserved[3];
+	u8	outer_vlan_flags;
+#define I40E_AQ_VSI_OVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_OVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_OVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_OVLAN_MODE_UNTAGGED	0x01
+#define I40E_AQ_VSI_OVLAN_MODE_TAGGED	0x02
+#define I40E_AQ_VSI_OVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_OVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_OVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_OVLAN_EMOD_MASK	(0x03 <<\
+					 I40E_AQ_VSI_OVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_OVLAN_EMOD_SHOW_ALL	0x00
+#define I40E_AQ_VSI_OVLAN_EMOD_SHOW_UP	0x01
+#define I40E_AQ_VSI_OVLAN_EMOD_HIDE_ALL	0x02
+#define I40E_AQ_VSI_OVLAN_EMOD_NOTHING	0x03
+#define I40E_AQ_VSI_OVLAN_CTRL_ENA	0x04
+
+	u8	pvlan_reserved[2];
 	/* ingress egress up sections */
 	__le32	ingress_table; /* bitmap, 3 bits per up */
 #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
@@ -2017,6 +2035,15 @@ enum i40e_aq_link_speed {
 	I40E_LINK_SPEED_25GB	= (1 << I40E_LINK_SPEED_25GB_SHIFT),
 };
 
+enum i40e_prt_mac_pcs_link_speed {
+	I40E_PRT_MAC_PCS_LINK_SPEED_UNKNOWN = 0,
+	I40E_PRT_MAC_PCS_LINK_SPEED_100MB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_1GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_10GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_40GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_20GB
+};
+
 struct i40e_aqc_module_desc {
 	u8 oui[3];
 	u8 reserved1;
@@ -2427,11 +2454,15 @@ struct i40e_aqc_rollback_revision_update {
 	u8	optin_mode; /* bool */
 #define I40E_AQ_RREV_OPTION_MODE			0x01
 	u8	module_selected;
-#define I40E_AQ_RREV_MODULE_PCIE_ANALOG		0
-#define I40E_AQ_RREV_MODULE_PHY_ANALOG		1
-#define I40E_AQ_RREV_MODULE_OPTION_ROM		2
-#define I40E_AQ_RREV_MODULE_EMP_IMAGE		3
-#define I40E_AQ_RREV_MODULE_PE_IMAGE		4
+#define I40E_AQ_RREV_MODULE_PCIE_ANALOG			0
+#define I40E_AQ_RREV_MODULE_PHY_ANALOG			1
+#define I40E_AQ_RREV_MODULE_OPTION_ROM			2
+#define I40E_AQ_RREV_MODULE_EMP_IMAGE			3
+#define I40E_AQ_RREV_MODULE_PE_IMAGE			4
+#define I40E_AQ_RREV_MODULE_PHY_PLL_O_CONFIGURATION	5
+#define I40E_AQ_RREV_MODULE_PHY_0_CONFIGURATION		6
+#define I40E_AQ_RREV_MODULE_PHY_PLL_1_CONFIGURATION	7
+#define I40E_AQ_RREV_MODULE_PHY_1_CONFIGURATION		8
 	u8	reserved1[2];
 	u32	min_rrev;
 	u8	reserved2[8];
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index f11d25d0d8..122a3782a4 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -37,6 +37,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
 		case I40E_DEV_ID_10G_B:
 		case I40E_DEV_ID_10G_SFP:
 		case I40E_DEV_ID_5G_BASE_T_BC:
+		case I40E_DEV_ID_1G_BASE_T_BC:
 		case I40E_DEV_ID_20G_KR2:
 		case I40E_DEV_ID_20G_KR2_A:
 		case I40E_DEV_ID_25G_B:
@@ -54,6 +55,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
 		case I40E_DEV_ID_1G_BASE_T_X722:
 		case I40E_DEV_ID_10G_BASE_T_X722:
 		case I40E_DEV_ID_SFP_I_X722:
+		case I40E_DEV_ID_SFP_X722_A:
 			hw->mac.type = I40E_MAC_X722;
 			break;
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -176,8 +178,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
 		return "I40E_ERR_INVALID_MAC_ADDR";
 	case I40E_ERR_DEVICE_NOT_SUPPORTED:
 		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
-	case I40E_ERR_MASTER_REQUESTS_PENDING:
-		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+		return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
 	case I40E_ERR_INVALID_LINK_SETTINGS:
 		return "I40E_ERR_INVALID_LINK_SETTINGS";
 	case I40E_ERR_AUTONEG_NOT_COMPLETE:
@@ -1624,6 +1626,35 @@ u32 i40e_led_get(struct i40e_hw *hw)
 	return mode;
 }
 
+/**
+ * i40e_led_get_blink - return current LED blink setting
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the LED_BLINK bit as defined in the
+ * GPIO register definitions (0 = no blink, 1 = do blink).
+ **/
+bool i40e_led_get_blink(struct i40e_hw *hw)
+{
+	bool blink = 0;
+	int i;
+
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
+
+		if (!gpio_val)
+			continue;
+
+		blink = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK) >>
+			I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+		break;
+	}
+
+	return blink;
+}
+
 /**
  * i40e_led_set - set new on/off mode
  * @hw: pointer to the hw struct
@@ -3120,6 +3151,46 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 	return status;
 }
 
+/**
+ * i40e_prepare_add_macvlan
+ * @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
+ * @count: length of the list
+ * @seid: VSI for the mac address
+ *
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
+ **/
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+			 struct i40e_aq_desc *desc, u16 count, u16 seid)
+{
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc->params.raw;
+	u16 buf_size;
+	int i;
+
+	buf_size = count * sizeof(*mv_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	for (i = 0; i < count; i++)
+		if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
+			mv_list[i].flags |=
+			    CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
+	desc->flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	return buf_size;
+}
+
 /**
  * i40e_aq_add_macvlan
  * @hw: pointer to the hw struct
@@ -3130,8 +3201,74 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
  *
  * Add MAC/VLAN addresses to the HW filtering
  **/
-enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_add_macvlan_element_data *mv_list,
+enum i40e_status_code
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+		    struct i40e_aqc_add_macvlan_element_data *mv_list,
+		    u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+enum i40e_status_code
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+		       enum i40e_admin_queue_err *aq_status)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+	status = i40e_asq_send_command_v2(hw, &desc, mv_list, buf_size,
+					  cmd_details, aq_status);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -3139,7 +3276,6 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 		(struct i40e_aqc_macvlan *)&desc.params.raw;
 	enum i40e_status_code status;
 	u16 buf_size;
-	int i;
 
 	if (count == 0 || !mv_list || !hw)
 		return I40E_ERR_PARAM;
@@ -3147,17 +3283,12 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 	buf_size = count * sizeof(*mv_list);
 
 	/* prep the rest of the request */
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
 	cmd->num_addresses = CPU_TO_LE16(count);
 	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
 	cmd->seid[1] = 0;
 	cmd->seid[2] = 0;
 
-	for (i = 0; i < count; i++)
-		if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
-			mv_list[i].flags |=
-			    CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
-
 	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (buf_size > I40E_AQ_LARGE_BUF)
 		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
@@ -3169,18 +3300,25 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 }
 
 /**
- * i40e_aq_remove_macvlan
+ * i40e_aq_remove_macvlan_v2
  * @hw: pointer to the hw struct
  * @seid: VSI for the mac address
  * @mv_list: list of macvlans to be removed
  * @count: length of the list
  * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
  *
- * Remove MAC/VLAN addresses from the HW filtering
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
  **/
-enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
-			u16 count, struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+			  enum i40e_admin_queue_err *aq_status)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_macvlan *cmd =
@@ -3204,8 +3342,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 	if (buf_size > I40E_AQ_LARGE_BUF)
 		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
-				       cmd_details);
+	status = i40e_asq_send_command_v2(hw, &desc, mv_list, buf_size,
+					  cmd_details, aq_status);
 
 	return status;
 }
@@ -5554,7 +5692,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 				struct i40e_filter_control_settings *settings)
 {
 	u32 fcoe_cntx_size, fcoe_filt_size;
-	u32 pe_cntx_size, pe_filt_size;
 	u32 fcoe_fmax;
 
 	u32 val;
@@ -5599,8 +5736,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 	case I40E_HASH_FILTER_SIZE_256K:
 	case I40E_HASH_FILTER_SIZE_512K:
 	case I40E_HASH_FILTER_SIZE_1M:
-		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
-		pe_filt_size <<= (u32)settings->pe_filt_num;
 		break;
 	default:
 		return I40E_ERR_PARAM;
@@ -5617,8 +5752,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 	case I40E_DMA_CNTX_SIZE_64K:
 	case I40E_DMA_CNTX_SIZE_128K:
 	case I40E_DMA_CNTX_SIZE_256K:
-		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
-		pe_cntx_size <<= (u32)settings->pe_cntx_num;
 		break;
 	default:
 		return I40E_ERR_PARAM;
@@ -6803,6 +6936,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
 	case I40E_DEV_ID_10G_BASE_T4:
 	case I40E_DEV_ID_10G_BASE_T_BC:
 	case I40E_DEV_ID_5G_BASE_T_BC:
+	case I40E_DEV_ID_1G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
@@ -6839,7 +6973,9 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
 		break;
 	case I40E_DEV_ID_10G_BASE_T:
 	case I40E_DEV_ID_10G_BASE_T4:
+	case I40E_DEV_ID_10G_BASE_T_BC:
 	case I40E_DEV_ID_5G_BASE_T_BC:
+	case I40E_DEV_ID_1G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
@@ -8091,7 +8227,8 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 sec_off;
 	u32 i;
 
-	if (track_id == I40E_DDP_TRACKID_INVALID) {
+	if (track_id == I40E_DDP_TRACKID_INVALID ||
+	    track_id == I40E_DDP_TRACKID_RDONLY) {
 		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
 		return I40E_NOT_SUPPORTED;
 	}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 46add19c9f..8f9b7e823f 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -315,9 +315,15 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
 	 *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
 	 *        ---------------------------------
 	 */
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		etscfg->tcbwtable[i] = buf[offset++];
 
+		if (etscfg->prioritytable[i] == I40E_CEE_PGID_STRICT)
+			dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+		else
+			dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+	}
+
 	/* Number of TCs supported (1 octet) */
 	etscfg->maxtcs = buf[offset];
 }
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 02ae7be550..be261423b8 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -26,6 +26,7 @@
 #define I40E_DEV_ID_XXV710_N3000	0x0D58
 #define I40E_DEV_ID_10G_BASE_T_BC	0x15FF
 #define I40E_DEV_ID_5G_BASE_T_BC	0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC	0x0DD2
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
@@ -48,6 +49,7 @@
 #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
 #define I40E_DEV_ID_SFP_I_X722		0x37D3
+#define I40E_DEV_ID_SFP_X722_A		0x0DDA
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
 #define I40E_DEV_ID_X722_VF		0x37CD
 #endif /* VF_DRIVER */
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index c9287ff255..437fb03f4d 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -165,7 +165,7 @@ static inline uint64_t i40e_read64_addr(volatile void *addr)
 	I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
 #define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
 
-#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
 
 /* memory allocation tracking */
 struct i40e_dma_mem {
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 124222e476..8c21ac71ab 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -38,6 +38,13 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 				void *buff, /* can be NULL */
 				u16  buff_size,
 				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+			 struct i40e_aq_desc *desc,
+			 void *buff, /* can be NULL */
+			 u16  buff_size,
+			 struct i40e_asq_cmd_details *cmd_details,
+			 enum i40e_admin_queue_err *aq_status);
 #ifdef VF_DRIVER
 bool i40e_asq_done(struct i40e_hw *hw);
 #endif
@@ -66,6 +73,7 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
 #ifdef PF_DRIVER
 
 u32 i40e_led_get(struct i40e_hw *hw);
+bool i40e_led_get_blink(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
 				       u16 led_addr, u32 mode);
@@ -188,9 +196,19 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_add_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+		       enum i40e_admin_queue_err *aq_status);
 enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+			  enum i40e_admin_queue_err *aq_status);
 enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
 			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
 			struct i40e_asq_cmd_details *cmd_details,
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index ee4f333f9c..651b0230f7 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -1411,6 +1411,11 @@
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+/* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_LINK_STATUS1(_i) (0x0008C200 + ((_i) * 4))
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT 24
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_MASK \
+	I40E_MASK(0x7, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT)
 #define I40E_GL_FWRESETCNT                  0x00083100 /* Reset: POR */
 #define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
 #define I40E_GL_FWRESETCNT_FWRESETCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
@@ -2390,10 +2395,14 @@
 #define I40E_GL_FCOERPDC_MAX_INDEX      143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
 #define I40E_GL_FCOERPDC_FCOERPDC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX       143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR1H(_i)             (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX       143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT   0
+#define I40E_GL_RXERR1H_RXERR1H_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX       143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT   0
+#define I40E_GL_RXERR1L_RXERR1L_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
 #define I40E_GL_RXERR2_L(_i)             (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_RXERR2_L_MAX_INDEX       143
 #define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
@@ -3620,27 +3629,6 @@
 #define I40E_GLHMC_PETIMEROBJSZ                      0x000C2080 /* Reset: CORER */
 #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
 #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i)               (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX         15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i)              (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX        15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i)                 (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX           15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX                   0x000C204c /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK  I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX                 0x000C2048 /* Reset: CORER */
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK  I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ                   0x000C2044 /* Reset: CORER */
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
 #define I40E_GLHMC_PFPESDPART(_i)            (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFPESDPART_MAX_INDEX      15
 #define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
@@ -3761,18 +3749,6 @@
 #define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX           31
 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i)               (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX         31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i)              (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX        31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i)                 (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX           31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
 #define I40E_GLHMC_VFSDPART(_i)            (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
 #define I40E_GLHMC_VFSDPART_MAX_INDEX      31
 #define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
@@ -3873,6 +3849,11 @@
 #define I40E_PRTMAC_LINK_DOWN_COUNTER                         0x001E2440 /* Reset: GLOBR */
 #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
 #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+/* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_LINKSTA(_i) (0x001E2420 + ((_i) * 4))
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT 27
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_MASK \
+	I40E_MASK(0x7, I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT)
 #define I40E_GLNVM_AL_REQ                        0x000B6164 /* Reset: POR */
 #define I40E_GLNVM_AL_REQ_POR_SHIFT              0
 #define I40E_GLNVM_AL_REQ_POR_MASK               I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h
index cd72169f14..89b05ede3e 100644
--- a/drivers/net/i40e/base/i40e_status.h
+++ b/drivers/net/i40e/base/i40e_status.h
@@ -19,7 +19,7 @@ enum i40e_status_code {
 	I40E_ERR_ADAPTER_STOPPED		= -9,
 	I40E_ERR_INVALID_MAC_ADDR		= -10,
 	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
-	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_PRIMARY_REQUESTS_PENDING	= -12,
 	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
 	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
 	I40E_ERR_RESET_FAILED			= -15,
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 4674715ed7..3cfb0ca430 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -236,6 +236,14 @@ enum i40e_queue_type {
 	I40E_QUEUE_TYPE_UNKNOWN
 };
 
+enum i40e_prt_mac_link_speed {
+	I40E_PRT_MAC_LINK_SPEED_100MB = 0,
+	I40E_PRT_MAC_LINK_SPEED_1GB,
+	I40E_PRT_MAC_LINK_SPEED_10GB,
+	I40E_PRT_MAC_LINK_SPEED_40GB,
+	I40E_PRT_MAC_LINK_SPEED_20GB
+};
+
 struct i40e_link_status {
 	enum i40e_aq_phy_type phy_type;
 	enum i40e_aq_link_speed link_speed;
@@ -809,7 +817,7 @@ union i40e_32byte_rx_desc {
 		__le64  rsvd2;
 	} read;
 	struct {
-		struct {
+		struct i40e_32b_rx_wb_qw0 {
 			struct {
 				union {
 					__le16 mirroring_status;
@@ -847,6 +855,9 @@ union i40e_32byte_rx_desc {
 			} hi_dword;
 		} qword3;
 	} wb;  /* writeback */
+	struct {
+		u64 qword[4];
+	} raw;
 };
 
 #define I40E_RXD_QW0_MIRROR_STATUS_SHIFT	8
@@ -1417,6 +1428,7 @@ struct i40e_eth_stats {
 	u64 tx_broadcast;		/* bptc */
 	u64 tx_discards;		/* tdpc */
 	u64 tx_errors;			/* tepc */
+	u64 rx_discards_other;          /* rxerr1 */
 };
 
 /* Statistics collected per VEB per TC */
@@ -1551,6 +1563,9 @@ struct i40e_hw_port_stats {
 #define I40E_SR_FEATURE_CONFIGURATION_PTR	0x49
 #define I40E_SR_CONFIGURATION_METADATA_PTR	0x4D
 #define I40E_SR_IMMEDIATE_VALUES_PTR		0x4E
+#define I40E_SR_PRESERVATION_RULES_PTR		0x70
+#define I40E_X722_SR_5TH_FREE_PROVISION_AREA_PTR	0x71
+#define I40E_SR_6TH_FREE_PROVISION_AREA_PTR	0x71
 
 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
 #define I40E_SR_VPD_MODULE_MAX_SIZE		1024
@@ -1908,6 +1923,10 @@ struct i40e_lldp_variables {
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
 
 /* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT		49
+#define I40E_X722_L3_SRC_MASK		(0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT		41
+#define I40E_X722_L3_DST_MASK		(0x3ULL << I40E_X722_L3_DST_SHIFT)
 #define I40E_L3_SRC_SHIFT		47
 #define I40E_L3_SRC_MASK		(0x3ULL << I40E_L3_SRC_SHIFT)
 #define I40E_L3_V6_SRC_SHIFT		43
@@ -1974,6 +1993,10 @@ struct i40e_metadata_segment {
 	struct i40e_ddp_version version;
 #define I40E_DDP_TRACKID_RDONLY		0
 #define I40E_DDP_TRACKID_INVALID	0xFFFFFFFF
+#define I40E_DDP_TRACKID_GRP_MSK	0x00FF0000
+#define I40E_DDP_TRACKID_GRP_COMP_ALL	0xFF
+#define I40E_DDP_TRACKID_PKGTYPE_MSK	0xFF000000
+#define I40E_DDP_TRACKID_PKGTYPE_RDONLY	0
 	u32 track_id;
 	char name[I40E_DDP_NAME_SIZE];
 };
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index ba4a4a9ddc..298044c828 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -397,6 +397,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
 				      struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
 	const struct rte_eth_ethertype_filter *input,
@@ -1775,11 +1776,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	 */
 	i40e_add_tx_flow_control_drop_filter(pf);
 
-	/* Set the max frame size to 0x2600 by default,
-	 * in case other drivers changed the default value.
-	 */
-	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
-
 	/* initialize mirror rule list */
 	TAILQ_INIT(&pf->mirror_list);
 
@@ -2434,6 +2430,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	uint32_t intr_vector = 0;
 	struct i40e_vsi *vsi;
 	uint16_t nb_rxq, nb_txq;
+	uint16_t max_frame_size;
 
 	hw->adapter_stopped = 0;
 
@@ -2575,6 +2572,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
+	i40e_set_mac_max_frame(dev, max_frame_size);
+
 	return I40E_SUCCESS;
 
 tx_err:
@@ -2942,11 +2942,13 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
+#define CHECK_INTERVAL             100  /* 100ms */
+#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
+
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
 /* Link status registers and values*/
-#define I40E_PRTMAC_LINKSTA		0x001E2420
 #define I40E_REG_LINK_UP		0x40000080
 #define I40E_PRTMAC_MACC		0x001E24E0
 #define I40E_REG_MACC_25GB		0x00020000
@@ -2959,7 +2961,7 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	uint32_t link_speed;
 	uint32_t reg_val;
 
-	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0));
 	link_speed = reg_val & I40E_REG_SPEED_MASK;
 	reg_val &= I40E_REG_LINK_UP;
 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
@@ -3009,8 +3011,6 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	bool enable_lse, int wait_to_complete)
 {
-#define CHECK_INTERVAL             100  /* 100ms */
-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
 	uint32_t rep_cnt = MAX_REPEAT_TIME;
 	struct i40e_link_status link_status;
 	int status;
@@ -3297,7 +3297,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
 			    &os->eth.rx_unknown_protocol,
 			    &ns->eth.rx_unknown_protocol);
 	i40e_stat_update_48(hw, I40E_GL_RXERR1_H(hw->pf_id + I40E_MAX_VF),
-			    I40E_GL_RXERR1_L(hw->pf_id + I40E_MAX_VF),
+			    I40E_GL_RXERR1L(hw->pf_id + I40E_MAX_VF),
 			    pf->offset_loaded, &pf->rx_err1_offset,
 			    &pf->rx_err1);
 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
@@ -6813,6 +6813,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 			if (!ret)
 				rte_eth_dev_callback_process(dev,
 					RTE_ETH_EVENT_INTR_LSC, NULL);
+
 			break;
 		default:
 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -7104,6 +7105,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 	int ret = I40E_SUCCESS;
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
 	struct i40e_aqc_remove_macvlan_element_data *req_list;
+	enum i40e_admin_queue_err aq_status;
 
 	if (filter == NULL  || total == 0)
 		return I40E_ERR_PARAM;
@@ -7151,11 +7153,19 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 			req_list[i].flags = rte_cpu_to_le_16(flags);
 		}
 
-		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
-						actual_num, NULL);
+		ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list,
+						actual_num, NULL, &aq_status);
+
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
-			goto DONE;
+			/* Do not report as an error
+			 * when firmware returns ENOENT
+			 */
+			if (aq_status == I40E_AQ_RC_ENOENT) {
+				ret = I40E_SUCCESS;
+			} else {
+				PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+				goto DONE;
+			}
 		}
 		num += actual_num;
 	} while (num < total);
@@ -12585,6 +12595,31 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	return ret;
 }
 
+static void
+i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t rep_cnt = MAX_REPEAT_TIME;
+	struct rte_eth_link link;
+	enum i40e_status_code status;
+
+	do {
+		update_link_reg(hw, &link);
+		if (link.link_status)
+			break;
+
+		rte_delay_ms(CHECK_INTERVAL);
+	} while (--rep_cnt);
+
+	if (link.link_status) {
+		status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
+		if (status != I40E_SUCCESS)
+			PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
+	} else {
+		PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
+	}
+}
+
 int
 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
 		   const struct rte_flow_action_rss *in)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b56f9f9149..e6adbb425e 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1153,10 +1153,6 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 }
 
 static const struct rte_pci_id pci_id_i40evf_map[] = {
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
@@ -1236,7 +1232,7 @@ i40evf_reset_vf(struct rte_eth_dev *dev)
 	  * it to ACTIVE. In this duration, vf may not catch the moment that
 	  * COMPLETE is set. So, for vf, we'll try to wait a long time.
 	  */
-	rte_delay_ms(200);
+	rte_delay_ms(500);
 
 	ret = i40evf_check_vf_reset_done(dev);
 	if (ret) {
diff --git a/drivers/net/i40e/i40e_regs.h b/drivers/net/i40e/i40e_regs.h
index b19bb1d5a5..cf62c9dfb7 100644
--- a/drivers/net/i40e/i40e_regs.h
+++ b/drivers/net/i40e/i40e_regs.h
@@ -586,9 +586,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"},
 	{I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"},
 	{I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"},
-	{I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"},
-	{I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"},
-	{I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"},
 	{I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"},
 	{I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"},
 	{I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"},
@@ -616,9 +613,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"},
 	{I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"},
 	{I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"},
-	{I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"},
-	{I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"},
-	{I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"},
 	{I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"},
 	{I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"},
 	{I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"},
@@ -653,9 +647,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"},
 	{I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"},
 	{I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"},
-	{I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"},
-	{I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"},
-	{I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"},
 	{I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"},
 	{I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"},
 	{I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"},
@@ -896,7 +887,7 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"},
 	{I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"},
 	{I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"},
-	{I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
+	{I40E_GL_RXERR1L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
 	{I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"},
 	{I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"},
 	{I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"},
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index b26bd0640d..91ccc345f0 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -449,7 +449,11 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
 					I40E_RXD_QW1_STATUS_SHIFT;
 		}
 
-		rte_smp_rmb();
+		/**
+		 * This barrier is to order loads of different words
+		 *  in the descriptor.
+		 */
+		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
 		/* Compute how many status bits were set */
 		for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 168e4fef02..01cfc44a66 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -127,6 +127,10 @@ static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 
 static const struct rte_pci_id pci_id_iavf_map[] = {
 	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
@@ -1288,6 +1292,9 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
+		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
+					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
 				pstats->rx_broadcast - pstats->rx_discards;
@@ -1296,7 +1303,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		stats->imissed = pstats->rx_discards;
 		stats->oerrors = pstats->tx_errors + pstats->tx_discards;
 		stats->ibytes = pstats->rx_bytes;
-		stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+		stats->ibytes -= stats->ipackets * crc_stats_len;
 		stats->obytes = pstats->tx_bytes;
 	} else {
 		PMD_DRV_LOG(ERR, "Get statistics failed");
@@ -1864,6 +1871,9 @@ iavf_init_vf(struct rte_eth_dev *dev)
 		}
 	}
 
+	if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
+		vf->lv_enabled = true;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
 		if (iavf_get_supported_rxdid(adapter) != 0) {
 			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
@@ -1888,6 +1898,27 @@ iavf_init_vf(struct rte_eth_dev *dev)
 	return -1;
 }
 
+static void
+iavf_uninit_vf(struct rte_eth_dev *dev)
+{
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_shutdown_adminq(hw);
+
+	rte_free(vf->vf_res);
+	vf->vsi_res = NULL;
+	vf->vf_res = NULL;
+
+	rte_free(vf->aq_resp);
+	vf->aq_resp = NULL;
+
+	rte_free(vf->rss_lut);
+	vf->rss_lut = NULL;
+	rte_free(vf->rss_key);
+	vf->rss_key = NULL;
+}
+
 /* Enable default admin queue interrupt setting */
 static inline void
 iavf_enable_irq0(struct iavf_hw *hw)
@@ -2013,7 +2044,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
 			     " store MAC addresses",
 			     RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto init_vf_err;
 	}
 	/* If the MAC address is not configured by host,
 	 * generate a random one.
@@ -2038,10 +2070,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	ret = iavf_flow_init(adapter);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to initialize flow");
-		return ret;
+		goto flow_init_err;
 	}
 
 	return 0;
+
+flow_init_err:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+
+init_vf_err:
+	iavf_uninit_vf(eth_dev);
+
+	return ret;
 }
 
 static int
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 555551008b..262d366461 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -439,48 +439,53 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
 #endif
 }
 
+static const
+iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[IAVF_RXDID_LAST + 1] = {
+	[IAVF_RXDID_LEGACY_0] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+	[IAVF_RXDID_LEGACY_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+	[IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
+		iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
+		iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+	[IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+};
+
 static void
 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 {
+	rxq->rxdid = rxdid;
+
 	switch (rxdid) {
 	case IAVF_RXDID_COMMS_AUX_VLAN:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV4:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV6:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
 		rxq->xtr_ol_flag =
 			rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_TCP:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
 		rxq->xtr_ol_flag =
 			rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
 	case IAVF_RXDID_COMMS_OVS_1:
-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+	case IAVF_RXDID_LEGACY_0:
+	case IAVF_RXDID_LEGACY_1:
 		break;
 	default:
-		/* update this according to the RXDID for FLEX_DESC_NONE */
-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+		rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
 		break;
 	}
 
@@ -506,9 +511,12 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	uint8_t proto_xtr;
 	uint16_t len;
 	uint16_t rx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > IAVF_MAX_RING_DESC ||
 	    nb_desc < IAVF_MIN_RING_DESC) {
@@ -562,6 +570,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->rx_hdr_len = 0;
 	rxq->vsi = vsi;
+	rxq->offloads = offloads;
 
 	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
@@ -1105,7 +1114,7 @@ iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
 			   rxq->port_id, rxq->queue_id, rx_id, nb_hold);
 		rx_id = (uint16_t)((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
-		IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+		IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 		nb_hold = 0;
 	}
 	rxq->nb_rx_hold = nb_hold;
@@ -1304,7 +1313,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
 
@@ -1446,7 +1455,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		first_seg->ol_flags |= pkt_flags;
@@ -1637,7 +1646,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 	struct rte_mbuf *mb;
 	uint16_t stat_err0;
 	uint16_t pkt_len;
-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
 	int32_t i, j, nb_rx = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1662,9 +1671,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 		rte_smp_rmb();
 
-		/* Compute how many status bits were set */
-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-			nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+		/* Compute how many contiguous DD bits were set */
+		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+			var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+#ifdef RTE_ARCH_ARM
+			/* For Arm platforms, count only contiguous descriptors
+			 * whose DD bit is set to 1. On Arm platforms, reads of
+			 * descriptors can be reordered. Since the CPU may
+			 * be reading the descriptors as the NIC updates them
+			 * in memory, it is possbile that the DD bit for a
+			 * descriptor earlier in the queue is read as not set
+			 * while the DD bit for a descriptor later in the queue
+			 * is read as set.
+			 */
+			if (var)
+				nb_dd += 1;
+			else
+				break;
+#else
+			nb_dd += var;
+#endif
+		}
 
 		nb_rx += nb_dd;
 
@@ -1684,7 +1711,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
-			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
@@ -1714,7 +1741,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 	uint16_t pkt_len;
 	uint64_t qword1;
 	uint32_t rx_status;
-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
 	int32_t i, j, nb_rx = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1745,9 +1772,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 
 		rte_smp_rmb();
 
-		/* Compute how many status bits were set */
-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-			nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+		/* Compute how many contiguous DD bits were set */
+		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+			var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+#ifdef RTE_ARCH_ARM
+			/* For Arm platforms, count only contiguous descriptors
+			 * whose DD bit is set to 1. On Arm platforms, reads of
+			 * descriptors can be reordered. Since the CPU may
+			 * be reading the descriptors as the NIC updates them
+			 * in memory, it is possbile that the DD bit for a
+			 * descriptor earlier in the queue is read as not set
+			 * while the DD bit for a descriptor later in the queue
+			 * is read as set.
+			 */
+			if (var)
+				nb_dd += 1;
+			else
+				break;
+#else
+			nb_dd += var;
+#endif
+		}
 
 		nb_rx += nb_dd;
 
@@ -1854,7 +1899,7 @@ iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
 
 	/* Update rx tail register */
 	rte_wmb();
-	IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+	IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
 
 	rxq->rx_free_trigger =
 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -2267,7 +2312,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
 		   txq->port_id, txq->queue_id, tx_id, nb_tx);
 
-	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
 	txq->tx_tail = tx_id;
 
 	return nb_tx;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index decfe3ad4c..2d48b65922 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -192,9 +192,8 @@ struct iavf_rx_queue {
 	const struct iavf_rxq_ops *ops;
 	uint8_t proto_xtr; /* protocol extraction type */
 	uint64_t xtr_ol_flag;
-		/* flexible descriptor metadata extraction offload flag */
-	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
-				/* handle flexible descriptor by RXDID */
+	/* flexible descriptor metadata extraction offload flag */
+	uint64_t offloads;
 };
 
 struct iavf_tx_entry {
@@ -349,41 +348,6 @@ enum iavf_rxdid {
 	IAVF_RXDID_LAST			= 63,
 };
 
-enum iavf_rx_flex_desc_status_error_0_bits {
-	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
-	IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
-	IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
-	IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
-	IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
-	IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
-	IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
-};
-
-enum iavf_rx_flex_desc_status_error_1_bits {
-	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
-	/* [10:6] reserved */
-	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
-	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
-};
-
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index a006d90a24..c20e9ccaa9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1399,7 +1399,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	txq->tx_tail = tx_id;
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index f61681474c..5a84c42cf9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -140,7 +140,7 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
 			   (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 
 #define IAVF_RX_LEN_MASK 0x80808080
@@ -1654,7 +1654,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	txq->tx_tail = tx_id;
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 7629474508..3dfd921df9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -475,7 +475,7 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
 			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 #endif
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3298d71317..572078c7cd 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -88,7 +88,7 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
 		   rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 
 static inline void
@@ -1172,7 +1172,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
 		   txq->port_id, txq->queue_id, tx_id, nb_pkts);
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1460330572..156ccd21e4 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -157,7 +157,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (_atomic_set_cmd(vf, args->ops))
 		return -1;
 
-	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
+	ret = iavf_aq_send_msg_to_pf(hw, args->ops, VIRTCHNL_STATUS_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
@@ -463,7 +463,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
-		VIRTCHNL_VF_LARGE_NUM_QPAIRS;
+		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index e371b3dc67..db24c554f7 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -40,7 +40,7 @@ static __rte_always_inline int
 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
 			    uint8_t *req_msg, uint16_t req_msglen)
 {
-	return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
+	return iavf_aq_send_msg_to_pf(&hw->avf, op, VIRTCHNL_STATUS_SUCCESS,
 				      req_msg, req_msglen, NULL);
 }
 
@@ -105,7 +105,7 @@ static __rte_always_inline int
 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
 {
 	return iavf_aq_send_msg_to_pf(&hw->avf,
-				      cmd->v_op, IAVF_SUCCESS,
+				      cmd->v_op, VIRTCHNL_STATUS_SUCCESS,
 				      cmd->req_msg, cmd->req_msglen, NULL);
 }
 
-- 
2.27.0


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [RFC v2] net/i40e: backport i40e fixes and share code to 20.11.4
  2022-03-28  8:04 [RFC] net/i40e: backport i40e fixes and share code to 20.11.4 Steve Yang
@ 2022-04-07  6:11 ` Steve Yang
  0 siblings, 0 replies; 2+ messages in thread
From: Steve Yang @ 2022-04-07  6:11 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Steve Yang

Backport all the i40e share code to 20.11.4 based on
cid-i40e.2022.03.08.

Backport all DPDK fixes of i40e and iavf to 20.11.4 from 22.03.

Signed-off-by: Steve Yang <stevex.yang@intel.com>

---
v2: add new device id for i40e.
---
 drivers/common/iavf/README                  |   4 +-
 drivers/common/iavf/iavf_adminq.c           |   2 +-
 drivers/common/iavf/iavf_adminq.h           |   2 +-
 drivers/common/iavf/iavf_adminq_cmd.h       |   2 +-
 drivers/common/iavf/iavf_alloc.h            |   2 +-
 drivers/common/iavf/iavf_common.c           | 863 ++++++++++++++++-
 drivers/common/iavf/iavf_devids.h           |   3 +-
 drivers/common/iavf/iavf_impl.c             |   2 +-
 drivers/common/iavf/iavf_osdep.h            |   7 +-
 drivers/common/iavf/iavf_prototype.h        |   6 +-
 drivers/common/iavf/iavf_register.h         |   2 +-
 drivers/common/iavf/iavf_status.h           |   4 +-
 drivers/common/iavf/iavf_type.h             |  92 +-
 drivers/common/iavf/meson.build             |   2 +-
 drivers/common/iavf/siov_regs.h             |  47 +
 drivers/common/iavf/virtchnl.h              | 981 ++++++++++++++++++--
 drivers/common/iavf/virtchnl_inline_ipsec.h | 562 +++++++++++
 drivers/net/i40e/base/README                |   2 +-
 drivers/net/i40e/base/i40e_adminq.c         |  73 +-
 drivers/net/i40e/base/i40e_adminq_cmd.h     |  49 +-
 drivers/net/i40e/base/i40e_common.c         | 185 +++-
 drivers/net/i40e/base/i40e_dcb.c            |   8 +-
 drivers/net/i40e/base/i40e_devids.h         |   2 +
 drivers/net/i40e/base/i40e_osdep.h          |   2 +-
 drivers/net/i40e/base/i40e_prototype.h      |  18 +
 drivers/net/i40e/base/i40e_register.h       |  55 +-
 drivers/net/i40e/base/i40e_status.h         |   2 +-
 drivers/net/i40e/base/i40e_type.h           |  25 +-
 drivers/net/i40e/i40e_ethdev.c              |  66 +-
 drivers/net/i40e/i40e_ethdev_vf.c           |   6 +-
 drivers/net/i40e/i40e_regs.h                |  11 +-
 drivers/net/i40e/i40e_rxtx.c                |   6 +-
 drivers/net/iavf/iavf_ethdev.c              |  47 +-
 drivers/net/iavf/iavf_rxtx.c                | 103 +-
 drivers/net/iavf/iavf_rxtx.h                |  40 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c       |   2 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c     |   4 +-
 drivers/net/iavf/iavf_rxtx_vec_common.h     |   2 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c        |   4 +-
 drivers/net/iavf/iavf_vchnl.c               |   5 +-
 drivers/net/ice/ice_dcf.c                   |   4 +-
 41 files changed, 3004 insertions(+), 300 deletions(-)
 create mode 100644 drivers/common/iavf/siov_regs.h
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/README b/drivers/common/iavf/README
index 5a42750465..f59839ff97 100644
--- a/drivers/common/iavf/README
+++ b/drivers/common/iavf/README
@@ -1,12 +1,12 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Intel Corporation
+ * Copyright(c) 2019-2021 Intel Corporation
  */
 
 Intel® IAVF driver
 =================
 
 This directory contains source code of FreeBSD IAVF driver of version
-cid-avf.2020.10.14.tar.gz released by the team which develops
+cid-avf.2022.03.08.tar.gz released by the team which develops
 basic drivers for any IAVF NIC. The directory of base/ contains the
 original source package.
 
diff --git a/drivers/common/iavf/iavf_adminq.c b/drivers/common/iavf/iavf_adminq.c
index 8d03de0553..9c36e8908e 100644
--- a/drivers/common/iavf/iavf_adminq.c
+++ b/drivers/common/iavf/iavf_adminq.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #include "iavf_status.h"
diff --git a/drivers/common/iavf/iavf_adminq.h b/drivers/common/iavf/iavf_adminq.h
index 93214162eb..e2374f9b95 100644
--- a/drivers/common/iavf/iavf_adminq.h
+++ b/drivers/common/iavf/iavf_adminq.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ADMINQ_H_
diff --git a/drivers/common/iavf/iavf_adminq_cmd.h b/drivers/common/iavf/iavf_adminq_cmd.h
index 5b748426ad..2a3006a526 100644
--- a/drivers/common/iavf/iavf_adminq_cmd.h
+++ b/drivers/common/iavf/iavf_adminq_cmd.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ADMINQ_CMD_H_
diff --git a/drivers/common/iavf/iavf_alloc.h b/drivers/common/iavf/iavf_alloc.h
index 7b7a205cff..6ef8da65a1 100644
--- a/drivers/common/iavf/iavf_alloc.h
+++ b/drivers/common/iavf/iavf_alloc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_ALLOC_H_
diff --git a/drivers/common/iavf/iavf_common.c b/drivers/common/iavf/iavf_common.c
index c951b7d787..03872dece8 100644
--- a/drivers/common/iavf/iavf_common.c
+++ b/drivers/common/iavf/iavf_common.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #include "iavf_type.h"
@@ -135,8 +135,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
 		return "IAVF_ERR_INVALID_MAC_ADDR";
 	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
 		return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
-	case IAVF_ERR_MASTER_REQUESTS_PENDING:
-		return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+		return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
 	case IAVF_ERR_INVALID_LINK_SETTINGS:
 		return "IAVF_ERR_INVALID_LINK_SETTINGS";
 	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
@@ -520,9 +520,9 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
 	return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
 }
 
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
+/* The iavf_ptype_lookup table is used to convert from the 8-bit and 10-bit
+ * ptype in the hardware to a bit-field that can be used by SW to more easily
+ * determine the packet type.
  *
  * Macros are used to shorten the table lines and make this table human
  * readable.
@@ -882,7 +882,852 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
 	IAVF_PTT_UNUSED_ENTRY(252),
 	IAVF_PTT_UNUSED_ENTRY(253),
 	IAVF_PTT_UNUSED_ENTRY(254),
-	IAVF_PTT_UNUSED_ENTRY(255)
+	IAVF_PTT_UNUSED_ENTRY(255),
+	IAVF_PTT_UNUSED_ENTRY(256),
+	IAVF_PTT_UNUSED_ENTRY(257),
+	IAVF_PTT_UNUSED_ENTRY(258),
+	IAVF_PTT_UNUSED_ENTRY(259),
+
+	IAVF_PTT_UNUSED_ENTRY(260),
+	IAVF_PTT_UNUSED_ENTRY(261),
+	IAVF_PTT_UNUSED_ENTRY(262),
+	IAVF_PTT_UNUSED_ENTRY(263),
+	IAVF_PTT_UNUSED_ENTRY(264),
+	IAVF_PTT_UNUSED_ENTRY(265),
+	IAVF_PTT_UNUSED_ENTRY(266),
+	IAVF_PTT_UNUSED_ENTRY(267),
+	IAVF_PTT_UNUSED_ENTRY(268),
+	IAVF_PTT_UNUSED_ENTRY(269),
+
+	IAVF_PTT_UNUSED_ENTRY(270),
+	IAVF_PTT_UNUSED_ENTRY(271),
+	IAVF_PTT_UNUSED_ENTRY(272),
+	IAVF_PTT_UNUSED_ENTRY(273),
+	IAVF_PTT_UNUSED_ENTRY(274),
+	IAVF_PTT_UNUSED_ENTRY(275),
+	IAVF_PTT_UNUSED_ENTRY(276),
+	IAVF_PTT_UNUSED_ENTRY(277),
+	IAVF_PTT_UNUSED_ENTRY(278),
+	IAVF_PTT_UNUSED_ENTRY(279),
+
+	IAVF_PTT_UNUSED_ENTRY(280),
+	IAVF_PTT_UNUSED_ENTRY(281),
+	IAVF_PTT_UNUSED_ENTRY(282),
+	IAVF_PTT_UNUSED_ENTRY(283),
+	IAVF_PTT_UNUSED_ENTRY(284),
+	IAVF_PTT_UNUSED_ENTRY(285),
+	IAVF_PTT_UNUSED_ENTRY(286),
+	IAVF_PTT_UNUSED_ENTRY(287),
+	IAVF_PTT_UNUSED_ENTRY(288),
+	IAVF_PTT_UNUSED_ENTRY(289),
+
+	IAVF_PTT_UNUSED_ENTRY(290),
+	IAVF_PTT_UNUSED_ENTRY(291),
+	IAVF_PTT_UNUSED_ENTRY(292),
+	IAVF_PTT_UNUSED_ENTRY(293),
+	IAVF_PTT_UNUSED_ENTRY(294),
+	IAVF_PTT_UNUSED_ENTRY(295),
+	IAVF_PTT_UNUSED_ENTRY(296),
+	IAVF_PTT_UNUSED_ENTRY(297),
+	IAVF_PTT_UNUSED_ENTRY(298),
+	IAVF_PTT_UNUSED_ENTRY(299),
+
+	IAVF_PTT_UNUSED_ENTRY(300),
+	IAVF_PTT_UNUSED_ENTRY(301),
+	IAVF_PTT_UNUSED_ENTRY(302),
+	IAVF_PTT_UNUSED_ENTRY(303),
+	IAVF_PTT_UNUSED_ENTRY(304),
+	IAVF_PTT_UNUSED_ENTRY(305),
+	IAVF_PTT_UNUSED_ENTRY(306),
+	IAVF_PTT_UNUSED_ENTRY(307),
+	IAVF_PTT_UNUSED_ENTRY(308),
+	IAVF_PTT_UNUSED_ENTRY(309),
+
+	IAVF_PTT_UNUSED_ENTRY(310),
+	IAVF_PTT_UNUSED_ENTRY(311),
+	IAVF_PTT_UNUSED_ENTRY(312),
+	IAVF_PTT_UNUSED_ENTRY(313),
+	IAVF_PTT_UNUSED_ENTRY(314),
+	IAVF_PTT_UNUSED_ENTRY(315),
+	IAVF_PTT_UNUSED_ENTRY(316),
+	IAVF_PTT_UNUSED_ENTRY(317),
+	IAVF_PTT_UNUSED_ENTRY(318),
+	IAVF_PTT_UNUSED_ENTRY(319),
+
+	IAVF_PTT_UNUSED_ENTRY(320),
+	IAVF_PTT_UNUSED_ENTRY(321),
+	IAVF_PTT_UNUSED_ENTRY(322),
+	IAVF_PTT_UNUSED_ENTRY(323),
+	IAVF_PTT_UNUSED_ENTRY(324),
+	IAVF_PTT_UNUSED_ENTRY(325),
+	IAVF_PTT_UNUSED_ENTRY(326),
+	IAVF_PTT_UNUSED_ENTRY(327),
+	IAVF_PTT_UNUSED_ENTRY(328),
+	IAVF_PTT_UNUSED_ENTRY(329),
+
+	IAVF_PTT_UNUSED_ENTRY(330),
+	IAVF_PTT_UNUSED_ENTRY(331),
+	IAVF_PTT_UNUSED_ENTRY(332),
+	IAVF_PTT_UNUSED_ENTRY(333),
+	IAVF_PTT_UNUSED_ENTRY(334),
+	IAVF_PTT_UNUSED_ENTRY(335),
+	IAVF_PTT_UNUSED_ENTRY(336),
+	IAVF_PTT_UNUSED_ENTRY(337),
+	IAVF_PTT_UNUSED_ENTRY(338),
+	IAVF_PTT_UNUSED_ENTRY(339),
+
+	IAVF_PTT_UNUSED_ENTRY(340),
+	IAVF_PTT_UNUSED_ENTRY(341),
+	IAVF_PTT_UNUSED_ENTRY(342),
+	IAVF_PTT_UNUSED_ENTRY(343),
+	IAVF_PTT_UNUSED_ENTRY(344),
+	IAVF_PTT_UNUSED_ENTRY(345),
+	IAVF_PTT_UNUSED_ENTRY(346),
+	IAVF_PTT_UNUSED_ENTRY(347),
+	IAVF_PTT_UNUSED_ENTRY(348),
+	IAVF_PTT_UNUSED_ENTRY(349),
+
+	IAVF_PTT_UNUSED_ENTRY(350),
+	IAVF_PTT_UNUSED_ENTRY(351),
+	IAVF_PTT_UNUSED_ENTRY(352),
+	IAVF_PTT_UNUSED_ENTRY(353),
+	IAVF_PTT_UNUSED_ENTRY(354),
+	IAVF_PTT_UNUSED_ENTRY(355),
+	IAVF_PTT_UNUSED_ENTRY(356),
+	IAVF_PTT_UNUSED_ENTRY(357),
+	IAVF_PTT_UNUSED_ENTRY(358),
+	IAVF_PTT_UNUSED_ENTRY(359),
+
+	IAVF_PTT_UNUSED_ENTRY(360),
+	IAVF_PTT_UNUSED_ENTRY(361),
+	IAVF_PTT_UNUSED_ENTRY(362),
+	IAVF_PTT_UNUSED_ENTRY(363),
+	IAVF_PTT_UNUSED_ENTRY(364),
+	IAVF_PTT_UNUSED_ENTRY(365),
+	IAVF_PTT_UNUSED_ENTRY(366),
+	IAVF_PTT_UNUSED_ENTRY(367),
+	IAVF_PTT_UNUSED_ENTRY(368),
+	IAVF_PTT_UNUSED_ENTRY(369),
+
+	IAVF_PTT_UNUSED_ENTRY(370),
+	IAVF_PTT_UNUSED_ENTRY(371),
+	IAVF_PTT_UNUSED_ENTRY(372),
+	IAVF_PTT_UNUSED_ENTRY(373),
+	IAVF_PTT_UNUSED_ENTRY(374),
+	IAVF_PTT_UNUSED_ENTRY(375),
+	IAVF_PTT_UNUSED_ENTRY(376),
+	IAVF_PTT_UNUSED_ENTRY(377),
+	IAVF_PTT_UNUSED_ENTRY(378),
+	IAVF_PTT_UNUSED_ENTRY(379),
+
+	IAVF_PTT_UNUSED_ENTRY(380),
+	IAVF_PTT_UNUSED_ENTRY(381),
+	IAVF_PTT_UNUSED_ENTRY(382),
+	IAVF_PTT_UNUSED_ENTRY(383),
+	IAVF_PTT_UNUSED_ENTRY(384),
+	IAVF_PTT_UNUSED_ENTRY(385),
+	IAVF_PTT_UNUSED_ENTRY(386),
+	IAVF_PTT_UNUSED_ENTRY(387),
+	IAVF_PTT_UNUSED_ENTRY(388),
+	IAVF_PTT_UNUSED_ENTRY(389),
+
+	IAVF_PTT_UNUSED_ENTRY(390),
+	IAVF_PTT_UNUSED_ENTRY(391),
+	IAVF_PTT_UNUSED_ENTRY(392),
+	IAVF_PTT_UNUSED_ENTRY(393),
+	IAVF_PTT_UNUSED_ENTRY(394),
+	IAVF_PTT_UNUSED_ENTRY(395),
+	IAVF_PTT_UNUSED_ENTRY(396),
+	IAVF_PTT_UNUSED_ENTRY(397),
+	IAVF_PTT_UNUSED_ENTRY(398),
+	IAVF_PTT_UNUSED_ENTRY(399),
+
+	IAVF_PTT_UNUSED_ENTRY(400),
+	IAVF_PTT_UNUSED_ENTRY(401),
+	IAVF_PTT_UNUSED_ENTRY(402),
+	IAVF_PTT_UNUSED_ENTRY(403),
+	IAVF_PTT_UNUSED_ENTRY(404),
+	IAVF_PTT_UNUSED_ENTRY(405),
+	IAVF_PTT_UNUSED_ENTRY(406),
+	IAVF_PTT_UNUSED_ENTRY(407),
+	IAVF_PTT_UNUSED_ENTRY(408),
+	IAVF_PTT_UNUSED_ENTRY(409),
+
+	IAVF_PTT_UNUSED_ENTRY(410),
+	IAVF_PTT_UNUSED_ENTRY(411),
+	IAVF_PTT_UNUSED_ENTRY(412),
+	IAVF_PTT_UNUSED_ENTRY(413),
+	IAVF_PTT_UNUSED_ENTRY(414),
+	IAVF_PTT_UNUSED_ENTRY(415),
+	IAVF_PTT_UNUSED_ENTRY(416),
+	IAVF_PTT_UNUSED_ENTRY(417),
+	IAVF_PTT_UNUSED_ENTRY(418),
+	IAVF_PTT_UNUSED_ENTRY(419),
+
+	IAVF_PTT_UNUSED_ENTRY(420),
+	IAVF_PTT_UNUSED_ENTRY(421),
+	IAVF_PTT_UNUSED_ENTRY(422),
+	IAVF_PTT_UNUSED_ENTRY(423),
+	IAVF_PTT_UNUSED_ENTRY(424),
+	IAVF_PTT_UNUSED_ENTRY(425),
+	IAVF_PTT_UNUSED_ENTRY(426),
+	IAVF_PTT_UNUSED_ENTRY(427),
+	IAVF_PTT_UNUSED_ENTRY(428),
+	IAVF_PTT_UNUSED_ENTRY(429),
+
+	IAVF_PTT_UNUSED_ENTRY(430),
+	IAVF_PTT_UNUSED_ENTRY(431),
+	IAVF_PTT_UNUSED_ENTRY(432),
+	IAVF_PTT_UNUSED_ENTRY(433),
+	IAVF_PTT_UNUSED_ENTRY(434),
+	IAVF_PTT_UNUSED_ENTRY(435),
+	IAVF_PTT_UNUSED_ENTRY(436),
+	IAVF_PTT_UNUSED_ENTRY(437),
+	IAVF_PTT_UNUSED_ENTRY(438),
+	IAVF_PTT_UNUSED_ENTRY(439),
+
+	IAVF_PTT_UNUSED_ENTRY(440),
+	IAVF_PTT_UNUSED_ENTRY(441),
+	IAVF_PTT_UNUSED_ENTRY(442),
+	IAVF_PTT_UNUSED_ENTRY(443),
+	IAVF_PTT_UNUSED_ENTRY(444),
+	IAVF_PTT_UNUSED_ENTRY(445),
+	IAVF_PTT_UNUSED_ENTRY(446),
+	IAVF_PTT_UNUSED_ENTRY(447),
+	IAVF_PTT_UNUSED_ENTRY(448),
+	IAVF_PTT_UNUSED_ENTRY(449),
+
+	IAVF_PTT_UNUSED_ENTRY(450),
+	IAVF_PTT_UNUSED_ENTRY(451),
+	IAVF_PTT_UNUSED_ENTRY(452),
+	IAVF_PTT_UNUSED_ENTRY(453),
+	IAVF_PTT_UNUSED_ENTRY(454),
+	IAVF_PTT_UNUSED_ENTRY(455),
+	IAVF_PTT_UNUSED_ENTRY(456),
+	IAVF_PTT_UNUSED_ENTRY(457),
+	IAVF_PTT_UNUSED_ENTRY(458),
+	IAVF_PTT_UNUSED_ENTRY(459),
+
+	IAVF_PTT_UNUSED_ENTRY(460),
+	IAVF_PTT_UNUSED_ENTRY(461),
+	IAVF_PTT_UNUSED_ENTRY(462),
+	IAVF_PTT_UNUSED_ENTRY(463),
+	IAVF_PTT_UNUSED_ENTRY(464),
+	IAVF_PTT_UNUSED_ENTRY(465),
+	IAVF_PTT_UNUSED_ENTRY(466),
+	IAVF_PTT_UNUSED_ENTRY(467),
+	IAVF_PTT_UNUSED_ENTRY(468),
+	IAVF_PTT_UNUSED_ENTRY(469),
+
+	IAVF_PTT_UNUSED_ENTRY(470),
+	IAVF_PTT_UNUSED_ENTRY(471),
+	IAVF_PTT_UNUSED_ENTRY(472),
+	IAVF_PTT_UNUSED_ENTRY(473),
+	IAVF_PTT_UNUSED_ENTRY(474),
+	IAVF_PTT_UNUSED_ENTRY(475),
+	IAVF_PTT_UNUSED_ENTRY(476),
+	IAVF_PTT_UNUSED_ENTRY(477),
+	IAVF_PTT_UNUSED_ENTRY(478),
+	IAVF_PTT_UNUSED_ENTRY(479),
+
+	IAVF_PTT_UNUSED_ENTRY(480),
+	IAVF_PTT_UNUSED_ENTRY(481),
+	IAVF_PTT_UNUSED_ENTRY(482),
+	IAVF_PTT_UNUSED_ENTRY(483),
+	IAVF_PTT_UNUSED_ENTRY(484),
+	IAVF_PTT_UNUSED_ENTRY(485),
+	IAVF_PTT_UNUSED_ENTRY(486),
+	IAVF_PTT_UNUSED_ENTRY(487),
+	IAVF_PTT_UNUSED_ENTRY(488),
+	IAVF_PTT_UNUSED_ENTRY(489),
+
+	IAVF_PTT_UNUSED_ENTRY(490),
+	IAVF_PTT_UNUSED_ENTRY(491),
+	IAVF_PTT_UNUSED_ENTRY(492),
+	IAVF_PTT_UNUSED_ENTRY(493),
+	IAVF_PTT_UNUSED_ENTRY(494),
+	IAVF_PTT_UNUSED_ENTRY(495),
+	IAVF_PTT_UNUSED_ENTRY(496),
+	IAVF_PTT_UNUSED_ENTRY(497),
+	IAVF_PTT_UNUSED_ENTRY(498),
+	IAVF_PTT_UNUSED_ENTRY(499),
+
+	IAVF_PTT_UNUSED_ENTRY(500),
+	IAVF_PTT_UNUSED_ENTRY(501),
+	IAVF_PTT_UNUSED_ENTRY(502),
+	IAVF_PTT_UNUSED_ENTRY(503),
+	IAVF_PTT_UNUSED_ENTRY(504),
+	IAVF_PTT_UNUSED_ENTRY(505),
+	IAVF_PTT_UNUSED_ENTRY(506),
+	IAVF_PTT_UNUSED_ENTRY(507),
+	IAVF_PTT_UNUSED_ENTRY(508),
+	IAVF_PTT_UNUSED_ENTRY(509),
+
+	IAVF_PTT_UNUSED_ENTRY(510),
+	IAVF_PTT_UNUSED_ENTRY(511),
+	IAVF_PTT_UNUSED_ENTRY(512),
+	IAVF_PTT_UNUSED_ENTRY(513),
+	IAVF_PTT_UNUSED_ENTRY(514),
+	IAVF_PTT_UNUSED_ENTRY(515),
+	IAVF_PTT_UNUSED_ENTRY(516),
+	IAVF_PTT_UNUSED_ENTRY(517),
+	IAVF_PTT_UNUSED_ENTRY(518),
+	IAVF_PTT_UNUSED_ENTRY(519),
+
+	IAVF_PTT_UNUSED_ENTRY(520),
+	IAVF_PTT_UNUSED_ENTRY(521),
+	IAVF_PTT_UNUSED_ENTRY(522),
+	IAVF_PTT_UNUSED_ENTRY(523),
+	IAVF_PTT_UNUSED_ENTRY(524),
+	IAVF_PTT_UNUSED_ENTRY(525),
+	IAVF_PTT_UNUSED_ENTRY(526),
+	IAVF_PTT_UNUSED_ENTRY(527),
+	IAVF_PTT_UNUSED_ENTRY(528),
+	IAVF_PTT_UNUSED_ENTRY(529),
+
+	IAVF_PTT_UNUSED_ENTRY(530),
+	IAVF_PTT_UNUSED_ENTRY(531),
+	IAVF_PTT_UNUSED_ENTRY(532),
+	IAVF_PTT_UNUSED_ENTRY(533),
+	IAVF_PTT_UNUSED_ENTRY(534),
+	IAVF_PTT_UNUSED_ENTRY(535),
+	IAVF_PTT_UNUSED_ENTRY(536),
+	IAVF_PTT_UNUSED_ENTRY(537),
+	IAVF_PTT_UNUSED_ENTRY(538),
+	IAVF_PTT_UNUSED_ENTRY(539),
+
+	IAVF_PTT_UNUSED_ENTRY(540),
+	IAVF_PTT_UNUSED_ENTRY(541),
+	IAVF_PTT_UNUSED_ENTRY(542),
+	IAVF_PTT_UNUSED_ENTRY(543),
+	IAVF_PTT_UNUSED_ENTRY(544),
+	IAVF_PTT_UNUSED_ENTRY(545),
+	IAVF_PTT_UNUSED_ENTRY(546),
+	IAVF_PTT_UNUSED_ENTRY(547),
+	IAVF_PTT_UNUSED_ENTRY(548),
+	IAVF_PTT_UNUSED_ENTRY(549),
+
+	IAVF_PTT_UNUSED_ENTRY(550),
+	IAVF_PTT_UNUSED_ENTRY(551),
+	IAVF_PTT_UNUSED_ENTRY(552),
+	IAVF_PTT_UNUSED_ENTRY(553),
+	IAVF_PTT_UNUSED_ENTRY(554),
+	IAVF_PTT_UNUSED_ENTRY(555),
+	IAVF_PTT_UNUSED_ENTRY(556),
+	IAVF_PTT_UNUSED_ENTRY(557),
+	IAVF_PTT_UNUSED_ENTRY(558),
+	IAVF_PTT_UNUSED_ENTRY(559),
+
+	IAVF_PTT_UNUSED_ENTRY(560),
+	IAVF_PTT_UNUSED_ENTRY(561),
+	IAVF_PTT_UNUSED_ENTRY(562),
+	IAVF_PTT_UNUSED_ENTRY(563),
+	IAVF_PTT_UNUSED_ENTRY(564),
+	IAVF_PTT_UNUSED_ENTRY(565),
+	IAVF_PTT_UNUSED_ENTRY(566),
+	IAVF_PTT_UNUSED_ENTRY(567),
+	IAVF_PTT_UNUSED_ENTRY(568),
+	IAVF_PTT_UNUSED_ENTRY(569),
+
+	IAVF_PTT_UNUSED_ENTRY(570),
+	IAVF_PTT_UNUSED_ENTRY(571),
+	IAVF_PTT_UNUSED_ENTRY(572),
+	IAVF_PTT_UNUSED_ENTRY(573),
+	IAVF_PTT_UNUSED_ENTRY(574),
+	IAVF_PTT_UNUSED_ENTRY(575),
+	IAVF_PTT_UNUSED_ENTRY(576),
+	IAVF_PTT_UNUSED_ENTRY(577),
+	IAVF_PTT_UNUSED_ENTRY(578),
+	IAVF_PTT_UNUSED_ENTRY(579),
+
+	IAVF_PTT_UNUSED_ENTRY(580),
+	IAVF_PTT_UNUSED_ENTRY(581),
+	IAVF_PTT_UNUSED_ENTRY(582),
+	IAVF_PTT_UNUSED_ENTRY(583),
+	IAVF_PTT_UNUSED_ENTRY(584),
+	IAVF_PTT_UNUSED_ENTRY(585),
+	IAVF_PTT_UNUSED_ENTRY(586),
+	IAVF_PTT_UNUSED_ENTRY(587),
+	IAVF_PTT_UNUSED_ENTRY(588),
+	IAVF_PTT_UNUSED_ENTRY(589),
+
+	IAVF_PTT_UNUSED_ENTRY(590),
+	IAVF_PTT_UNUSED_ENTRY(591),
+	IAVF_PTT_UNUSED_ENTRY(592),
+	IAVF_PTT_UNUSED_ENTRY(593),
+	IAVF_PTT_UNUSED_ENTRY(594),
+	IAVF_PTT_UNUSED_ENTRY(595),
+	IAVF_PTT_UNUSED_ENTRY(596),
+	IAVF_PTT_UNUSED_ENTRY(597),
+	IAVF_PTT_UNUSED_ENTRY(598),
+	IAVF_PTT_UNUSED_ENTRY(599),
+
+	IAVF_PTT_UNUSED_ENTRY(600),
+	IAVF_PTT_UNUSED_ENTRY(601),
+	IAVF_PTT_UNUSED_ENTRY(602),
+	IAVF_PTT_UNUSED_ENTRY(603),
+	IAVF_PTT_UNUSED_ENTRY(604),
+	IAVF_PTT_UNUSED_ENTRY(605),
+	IAVF_PTT_UNUSED_ENTRY(606),
+	IAVF_PTT_UNUSED_ENTRY(607),
+	IAVF_PTT_UNUSED_ENTRY(608),
+	IAVF_PTT_UNUSED_ENTRY(609),
+
+	IAVF_PTT_UNUSED_ENTRY(610),
+	IAVF_PTT_UNUSED_ENTRY(611),
+	IAVF_PTT_UNUSED_ENTRY(612),
+	IAVF_PTT_UNUSED_ENTRY(613),
+	IAVF_PTT_UNUSED_ENTRY(614),
+	IAVF_PTT_UNUSED_ENTRY(615),
+	IAVF_PTT_UNUSED_ENTRY(616),
+	IAVF_PTT_UNUSED_ENTRY(617),
+	IAVF_PTT_UNUSED_ENTRY(618),
+	IAVF_PTT_UNUSED_ENTRY(619),
+
+	IAVF_PTT_UNUSED_ENTRY(620),
+	IAVF_PTT_UNUSED_ENTRY(621),
+	IAVF_PTT_UNUSED_ENTRY(622),
+	IAVF_PTT_UNUSED_ENTRY(623),
+	IAVF_PTT_UNUSED_ENTRY(624),
+	IAVF_PTT_UNUSED_ENTRY(625),
+	IAVF_PTT_UNUSED_ENTRY(626),
+	IAVF_PTT_UNUSED_ENTRY(627),
+	IAVF_PTT_UNUSED_ENTRY(628),
+	IAVF_PTT_UNUSED_ENTRY(629),
+
+	IAVF_PTT_UNUSED_ENTRY(630),
+	IAVF_PTT_UNUSED_ENTRY(631),
+	IAVF_PTT_UNUSED_ENTRY(632),
+	IAVF_PTT_UNUSED_ENTRY(633),
+	IAVF_PTT_UNUSED_ENTRY(634),
+	IAVF_PTT_UNUSED_ENTRY(635),
+	IAVF_PTT_UNUSED_ENTRY(636),
+	IAVF_PTT_UNUSED_ENTRY(637),
+	IAVF_PTT_UNUSED_ENTRY(638),
+	IAVF_PTT_UNUSED_ENTRY(639),
+
+	IAVF_PTT_UNUSED_ENTRY(640),
+	IAVF_PTT_UNUSED_ENTRY(641),
+	IAVF_PTT_UNUSED_ENTRY(642),
+	IAVF_PTT_UNUSED_ENTRY(643),
+	IAVF_PTT_UNUSED_ENTRY(644),
+	IAVF_PTT_UNUSED_ENTRY(645),
+	IAVF_PTT_UNUSED_ENTRY(646),
+	IAVF_PTT_UNUSED_ENTRY(647),
+	IAVF_PTT_UNUSED_ENTRY(648),
+	IAVF_PTT_UNUSED_ENTRY(649),
+
+	IAVF_PTT_UNUSED_ENTRY(650),
+	IAVF_PTT_UNUSED_ENTRY(651),
+	IAVF_PTT_UNUSED_ENTRY(652),
+	IAVF_PTT_UNUSED_ENTRY(653),
+	IAVF_PTT_UNUSED_ENTRY(654),
+	IAVF_PTT_UNUSED_ENTRY(655),
+	IAVF_PTT_UNUSED_ENTRY(656),
+	IAVF_PTT_UNUSED_ENTRY(657),
+	IAVF_PTT_UNUSED_ENTRY(658),
+	IAVF_PTT_UNUSED_ENTRY(659),
+
+	IAVF_PTT_UNUSED_ENTRY(660),
+	IAVF_PTT_UNUSED_ENTRY(661),
+	IAVF_PTT_UNUSED_ENTRY(662),
+	IAVF_PTT_UNUSED_ENTRY(663),
+	IAVF_PTT_UNUSED_ENTRY(664),
+	IAVF_PTT_UNUSED_ENTRY(665),
+	IAVF_PTT_UNUSED_ENTRY(666),
+	IAVF_PTT_UNUSED_ENTRY(667),
+	IAVF_PTT_UNUSED_ENTRY(668),
+	IAVF_PTT_UNUSED_ENTRY(669),
+
+	IAVF_PTT_UNUSED_ENTRY(670),
+	IAVF_PTT_UNUSED_ENTRY(671),
+	IAVF_PTT_UNUSED_ENTRY(672),
+	IAVF_PTT_UNUSED_ENTRY(673),
+	IAVF_PTT_UNUSED_ENTRY(674),
+	IAVF_PTT_UNUSED_ENTRY(675),
+	IAVF_PTT_UNUSED_ENTRY(676),
+	IAVF_PTT_UNUSED_ENTRY(677),
+	IAVF_PTT_UNUSED_ENTRY(678),
+	IAVF_PTT_UNUSED_ENTRY(679),
+
+	IAVF_PTT_UNUSED_ENTRY(680),
+	IAVF_PTT_UNUSED_ENTRY(681),
+	IAVF_PTT_UNUSED_ENTRY(682),
+	IAVF_PTT_UNUSED_ENTRY(683),
+	IAVF_PTT_UNUSED_ENTRY(684),
+	IAVF_PTT_UNUSED_ENTRY(685),
+	IAVF_PTT_UNUSED_ENTRY(686),
+	IAVF_PTT_UNUSED_ENTRY(687),
+	IAVF_PTT_UNUSED_ENTRY(688),
+	IAVF_PTT_UNUSED_ENTRY(689),
+
+	IAVF_PTT_UNUSED_ENTRY(690),
+	IAVF_PTT_UNUSED_ENTRY(691),
+	IAVF_PTT_UNUSED_ENTRY(692),
+	IAVF_PTT_UNUSED_ENTRY(693),
+	IAVF_PTT_UNUSED_ENTRY(694),
+	IAVF_PTT_UNUSED_ENTRY(695),
+	IAVF_PTT_UNUSED_ENTRY(696),
+	IAVF_PTT_UNUSED_ENTRY(697),
+	IAVF_PTT_UNUSED_ENTRY(698),
+	IAVF_PTT_UNUSED_ENTRY(699),
+
+	IAVF_PTT_UNUSED_ENTRY(700),
+	IAVF_PTT_UNUSED_ENTRY(701),
+	IAVF_PTT_UNUSED_ENTRY(702),
+	IAVF_PTT_UNUSED_ENTRY(703),
+	IAVF_PTT_UNUSED_ENTRY(704),
+	IAVF_PTT_UNUSED_ENTRY(705),
+	IAVF_PTT_UNUSED_ENTRY(706),
+	IAVF_PTT_UNUSED_ENTRY(707),
+	IAVF_PTT_UNUSED_ENTRY(708),
+	IAVF_PTT_UNUSED_ENTRY(709),
+
+	IAVF_PTT_UNUSED_ENTRY(710),
+	IAVF_PTT_UNUSED_ENTRY(711),
+	IAVF_PTT_UNUSED_ENTRY(712),
+	IAVF_PTT_UNUSED_ENTRY(713),
+	IAVF_PTT_UNUSED_ENTRY(714),
+	IAVF_PTT_UNUSED_ENTRY(715),
+	IAVF_PTT_UNUSED_ENTRY(716),
+	IAVF_PTT_UNUSED_ENTRY(717),
+	IAVF_PTT_UNUSED_ENTRY(718),
+	IAVF_PTT_UNUSED_ENTRY(719),
+
+	IAVF_PTT_UNUSED_ENTRY(720),
+	IAVF_PTT_UNUSED_ENTRY(721),
+	IAVF_PTT_UNUSED_ENTRY(722),
+	IAVF_PTT_UNUSED_ENTRY(723),
+	IAVF_PTT_UNUSED_ENTRY(724),
+	IAVF_PTT_UNUSED_ENTRY(725),
+	IAVF_PTT_UNUSED_ENTRY(726),
+	IAVF_PTT_UNUSED_ENTRY(727),
+	IAVF_PTT_UNUSED_ENTRY(728),
+	IAVF_PTT_UNUSED_ENTRY(729),
+
+	IAVF_PTT_UNUSED_ENTRY(730),
+	IAVF_PTT_UNUSED_ENTRY(731),
+	IAVF_PTT_UNUSED_ENTRY(732),
+	IAVF_PTT_UNUSED_ENTRY(733),
+	IAVF_PTT_UNUSED_ENTRY(734),
+	IAVF_PTT_UNUSED_ENTRY(735),
+	IAVF_PTT_UNUSED_ENTRY(736),
+	IAVF_PTT_UNUSED_ENTRY(737),
+	IAVF_PTT_UNUSED_ENTRY(738),
+	IAVF_PTT_UNUSED_ENTRY(739),
+
+	IAVF_PTT_UNUSED_ENTRY(740),
+	IAVF_PTT_UNUSED_ENTRY(741),
+	IAVF_PTT_UNUSED_ENTRY(742),
+	IAVF_PTT_UNUSED_ENTRY(743),
+	IAVF_PTT_UNUSED_ENTRY(744),
+	IAVF_PTT_UNUSED_ENTRY(745),
+	IAVF_PTT_UNUSED_ENTRY(746),
+	IAVF_PTT_UNUSED_ENTRY(747),
+	IAVF_PTT_UNUSED_ENTRY(748),
+	IAVF_PTT_UNUSED_ENTRY(749),
+
+	IAVF_PTT_UNUSED_ENTRY(750),
+	IAVF_PTT_UNUSED_ENTRY(751),
+	IAVF_PTT_UNUSED_ENTRY(752),
+	IAVF_PTT_UNUSED_ENTRY(753),
+	IAVF_PTT_UNUSED_ENTRY(754),
+	IAVF_PTT_UNUSED_ENTRY(755),
+	IAVF_PTT_UNUSED_ENTRY(756),
+	IAVF_PTT_UNUSED_ENTRY(757),
+	IAVF_PTT_UNUSED_ENTRY(758),
+	IAVF_PTT_UNUSED_ENTRY(759),
+
+	IAVF_PTT_UNUSED_ENTRY(760),
+	IAVF_PTT_UNUSED_ENTRY(761),
+	IAVF_PTT_UNUSED_ENTRY(762),
+	IAVF_PTT_UNUSED_ENTRY(763),
+	IAVF_PTT_UNUSED_ENTRY(764),
+	IAVF_PTT_UNUSED_ENTRY(765),
+	IAVF_PTT_UNUSED_ENTRY(766),
+	IAVF_PTT_UNUSED_ENTRY(767),
+	IAVF_PTT_UNUSED_ENTRY(768),
+	IAVF_PTT_UNUSED_ENTRY(769),
+
+	IAVF_PTT_UNUSED_ENTRY(770),
+	IAVF_PTT_UNUSED_ENTRY(771),
+	IAVF_PTT_UNUSED_ENTRY(772),
+	IAVF_PTT_UNUSED_ENTRY(773),
+	IAVF_PTT_UNUSED_ENTRY(774),
+	IAVF_PTT_UNUSED_ENTRY(775),
+	IAVF_PTT_UNUSED_ENTRY(776),
+	IAVF_PTT_UNUSED_ENTRY(777),
+	IAVF_PTT_UNUSED_ENTRY(778),
+	IAVF_PTT_UNUSED_ENTRY(779),
+
+	IAVF_PTT_UNUSED_ENTRY(780),
+	IAVF_PTT_UNUSED_ENTRY(781),
+	IAVF_PTT_UNUSED_ENTRY(782),
+	IAVF_PTT_UNUSED_ENTRY(783),
+	IAVF_PTT_UNUSED_ENTRY(784),
+	IAVF_PTT_UNUSED_ENTRY(785),
+	IAVF_PTT_UNUSED_ENTRY(786),
+	IAVF_PTT_UNUSED_ENTRY(787),
+	IAVF_PTT_UNUSED_ENTRY(788),
+	IAVF_PTT_UNUSED_ENTRY(789),
+
+	IAVF_PTT_UNUSED_ENTRY(790),
+	IAVF_PTT_UNUSED_ENTRY(791),
+	IAVF_PTT_UNUSED_ENTRY(792),
+	IAVF_PTT_UNUSED_ENTRY(793),
+	IAVF_PTT_UNUSED_ENTRY(794),
+	IAVF_PTT_UNUSED_ENTRY(795),
+	IAVF_PTT_UNUSED_ENTRY(796),
+	IAVF_PTT_UNUSED_ENTRY(797),
+	IAVF_PTT_UNUSED_ENTRY(798),
+	IAVF_PTT_UNUSED_ENTRY(799),
+
+	IAVF_PTT_UNUSED_ENTRY(800),
+	IAVF_PTT_UNUSED_ENTRY(801),
+	IAVF_PTT_UNUSED_ENTRY(802),
+	IAVF_PTT_UNUSED_ENTRY(803),
+	IAVF_PTT_UNUSED_ENTRY(804),
+	IAVF_PTT_UNUSED_ENTRY(805),
+	IAVF_PTT_UNUSED_ENTRY(806),
+	IAVF_PTT_UNUSED_ENTRY(807),
+	IAVF_PTT_UNUSED_ENTRY(808),
+	IAVF_PTT_UNUSED_ENTRY(809),
+
+	IAVF_PTT_UNUSED_ENTRY(810),
+	IAVF_PTT_UNUSED_ENTRY(811),
+	IAVF_PTT_UNUSED_ENTRY(812),
+	IAVF_PTT_UNUSED_ENTRY(813),
+	IAVF_PTT_UNUSED_ENTRY(814),
+	IAVF_PTT_UNUSED_ENTRY(815),
+	IAVF_PTT_UNUSED_ENTRY(816),
+	IAVF_PTT_UNUSED_ENTRY(817),
+	IAVF_PTT_UNUSED_ENTRY(818),
+	IAVF_PTT_UNUSED_ENTRY(819),
+
+	IAVF_PTT_UNUSED_ENTRY(820),
+	IAVF_PTT_UNUSED_ENTRY(821),
+	IAVF_PTT_UNUSED_ENTRY(822),
+	IAVF_PTT_UNUSED_ENTRY(823),
+	IAVF_PTT_UNUSED_ENTRY(824),
+	IAVF_PTT_UNUSED_ENTRY(825),
+	IAVF_PTT_UNUSED_ENTRY(826),
+	IAVF_PTT_UNUSED_ENTRY(827),
+	IAVF_PTT_UNUSED_ENTRY(828),
+	IAVF_PTT_UNUSED_ENTRY(829),
+
+	IAVF_PTT_UNUSED_ENTRY(830),
+	IAVF_PTT_UNUSED_ENTRY(831),
+	IAVF_PTT_UNUSED_ENTRY(832),
+	IAVF_PTT_UNUSED_ENTRY(833),
+	IAVF_PTT_UNUSED_ENTRY(834),
+	IAVF_PTT_UNUSED_ENTRY(835),
+	IAVF_PTT_UNUSED_ENTRY(836),
+	IAVF_PTT_UNUSED_ENTRY(837),
+	IAVF_PTT_UNUSED_ENTRY(838),
+	IAVF_PTT_UNUSED_ENTRY(839),
+
+	IAVF_PTT_UNUSED_ENTRY(840),
+	IAVF_PTT_UNUSED_ENTRY(841),
+	IAVF_PTT_UNUSED_ENTRY(842),
+	IAVF_PTT_UNUSED_ENTRY(843),
+	IAVF_PTT_UNUSED_ENTRY(844),
+	IAVF_PTT_UNUSED_ENTRY(845),
+	IAVF_PTT_UNUSED_ENTRY(846),
+	IAVF_PTT_UNUSED_ENTRY(847),
+	IAVF_PTT_UNUSED_ENTRY(848),
+	IAVF_PTT_UNUSED_ENTRY(849),
+
+	IAVF_PTT_UNUSED_ENTRY(850),
+	IAVF_PTT_UNUSED_ENTRY(851),
+	IAVF_PTT_UNUSED_ENTRY(852),
+	IAVF_PTT_UNUSED_ENTRY(853),
+	IAVF_PTT_UNUSED_ENTRY(854),
+	IAVF_PTT_UNUSED_ENTRY(855),
+	IAVF_PTT_UNUSED_ENTRY(856),
+	IAVF_PTT_UNUSED_ENTRY(857),
+	IAVF_PTT_UNUSED_ENTRY(858),
+	IAVF_PTT_UNUSED_ENTRY(859),
+
+	IAVF_PTT_UNUSED_ENTRY(860),
+	IAVF_PTT_UNUSED_ENTRY(861),
+	IAVF_PTT_UNUSED_ENTRY(862),
+	IAVF_PTT_UNUSED_ENTRY(863),
+	IAVF_PTT_UNUSED_ENTRY(864),
+	IAVF_PTT_UNUSED_ENTRY(865),
+	IAVF_PTT_UNUSED_ENTRY(866),
+	IAVF_PTT_UNUSED_ENTRY(867),
+	IAVF_PTT_UNUSED_ENTRY(868),
+	IAVF_PTT_UNUSED_ENTRY(869),
+
+	IAVF_PTT_UNUSED_ENTRY(870),
+	IAVF_PTT_UNUSED_ENTRY(871),
+	IAVF_PTT_UNUSED_ENTRY(872),
+	IAVF_PTT_UNUSED_ENTRY(873),
+	IAVF_PTT_UNUSED_ENTRY(874),
+	IAVF_PTT_UNUSED_ENTRY(875),
+	IAVF_PTT_UNUSED_ENTRY(876),
+	IAVF_PTT_UNUSED_ENTRY(877),
+	IAVF_PTT_UNUSED_ENTRY(878),
+	IAVF_PTT_UNUSED_ENTRY(879),
+
+	IAVF_PTT_UNUSED_ENTRY(880),
+	IAVF_PTT_UNUSED_ENTRY(881),
+	IAVF_PTT_UNUSED_ENTRY(882),
+	IAVF_PTT_UNUSED_ENTRY(883),
+	IAVF_PTT_UNUSED_ENTRY(884),
+	IAVF_PTT_UNUSED_ENTRY(885),
+	IAVF_PTT_UNUSED_ENTRY(886),
+	IAVF_PTT_UNUSED_ENTRY(887),
+	IAVF_PTT_UNUSED_ENTRY(888),
+	IAVF_PTT_UNUSED_ENTRY(889),
+
+	IAVF_PTT_UNUSED_ENTRY(890),
+	IAVF_PTT_UNUSED_ENTRY(891),
+	IAVF_PTT_UNUSED_ENTRY(892),
+	IAVF_PTT_UNUSED_ENTRY(893),
+	IAVF_PTT_UNUSED_ENTRY(894),
+	IAVF_PTT_UNUSED_ENTRY(895),
+	IAVF_PTT_UNUSED_ENTRY(896),
+	IAVF_PTT_UNUSED_ENTRY(897),
+	IAVF_PTT_UNUSED_ENTRY(898),
+	IAVF_PTT_UNUSED_ENTRY(899),
+
+	IAVF_PTT_UNUSED_ENTRY(900),
+	IAVF_PTT_UNUSED_ENTRY(901),
+	IAVF_PTT_UNUSED_ENTRY(902),
+	IAVF_PTT_UNUSED_ENTRY(903),
+	IAVF_PTT_UNUSED_ENTRY(904),
+	IAVF_PTT_UNUSED_ENTRY(905),
+	IAVF_PTT_UNUSED_ENTRY(906),
+	IAVF_PTT_UNUSED_ENTRY(907),
+	IAVF_PTT_UNUSED_ENTRY(908),
+	IAVF_PTT_UNUSED_ENTRY(909),
+
+	IAVF_PTT_UNUSED_ENTRY(910),
+	IAVF_PTT_UNUSED_ENTRY(911),
+	IAVF_PTT_UNUSED_ENTRY(912),
+	IAVF_PTT_UNUSED_ENTRY(913),
+	IAVF_PTT_UNUSED_ENTRY(914),
+	IAVF_PTT_UNUSED_ENTRY(915),
+	IAVF_PTT_UNUSED_ENTRY(916),
+	IAVF_PTT_UNUSED_ENTRY(917),
+	IAVF_PTT_UNUSED_ENTRY(918),
+	IAVF_PTT_UNUSED_ENTRY(919),
+
+	IAVF_PTT_UNUSED_ENTRY(920),
+	IAVF_PTT_UNUSED_ENTRY(921),
+	IAVF_PTT_UNUSED_ENTRY(922),
+	IAVF_PTT_UNUSED_ENTRY(923),
+	IAVF_PTT_UNUSED_ENTRY(924),
+	IAVF_PTT_UNUSED_ENTRY(925),
+	IAVF_PTT_UNUSED_ENTRY(926),
+	IAVF_PTT_UNUSED_ENTRY(927),
+	IAVF_PTT_UNUSED_ENTRY(928),
+	IAVF_PTT_UNUSED_ENTRY(929),
+
+	IAVF_PTT_UNUSED_ENTRY(930),
+	IAVF_PTT_UNUSED_ENTRY(931),
+	IAVF_PTT_UNUSED_ENTRY(932),
+	IAVF_PTT_UNUSED_ENTRY(933),
+	IAVF_PTT_UNUSED_ENTRY(934),
+	IAVF_PTT_UNUSED_ENTRY(935),
+	IAVF_PTT_UNUSED_ENTRY(936),
+	IAVF_PTT_UNUSED_ENTRY(937),
+	IAVF_PTT_UNUSED_ENTRY(938),
+	IAVF_PTT_UNUSED_ENTRY(939),
+
+	IAVF_PTT_UNUSED_ENTRY(940),
+	IAVF_PTT_UNUSED_ENTRY(941),
+	IAVF_PTT_UNUSED_ENTRY(942),
+	IAVF_PTT_UNUSED_ENTRY(943),
+	IAVF_PTT_UNUSED_ENTRY(944),
+	IAVF_PTT_UNUSED_ENTRY(945),
+	IAVF_PTT_UNUSED_ENTRY(946),
+	IAVF_PTT_UNUSED_ENTRY(947),
+	IAVF_PTT_UNUSED_ENTRY(948),
+	IAVF_PTT_UNUSED_ENTRY(949),
+
+	IAVF_PTT_UNUSED_ENTRY(950),
+	IAVF_PTT_UNUSED_ENTRY(951),
+	IAVF_PTT_UNUSED_ENTRY(952),
+	IAVF_PTT_UNUSED_ENTRY(953),
+	IAVF_PTT_UNUSED_ENTRY(954),
+	IAVF_PTT_UNUSED_ENTRY(955),
+	IAVF_PTT_UNUSED_ENTRY(956),
+	IAVF_PTT_UNUSED_ENTRY(957),
+	IAVF_PTT_UNUSED_ENTRY(958),
+	IAVF_PTT_UNUSED_ENTRY(959),
+
+	IAVF_PTT_UNUSED_ENTRY(960),
+	IAVF_PTT_UNUSED_ENTRY(961),
+	IAVF_PTT_UNUSED_ENTRY(962),
+	IAVF_PTT_UNUSED_ENTRY(963),
+	IAVF_PTT_UNUSED_ENTRY(964),
+	IAVF_PTT_UNUSED_ENTRY(965),
+	IAVF_PTT_UNUSED_ENTRY(966),
+	IAVF_PTT_UNUSED_ENTRY(967),
+	IAVF_PTT_UNUSED_ENTRY(968),
+	IAVF_PTT_UNUSED_ENTRY(969),
+
+	IAVF_PTT_UNUSED_ENTRY(970),
+	IAVF_PTT_UNUSED_ENTRY(971),
+	IAVF_PTT_UNUSED_ENTRY(972),
+	IAVF_PTT_UNUSED_ENTRY(973),
+	IAVF_PTT_UNUSED_ENTRY(974),
+	IAVF_PTT_UNUSED_ENTRY(975),
+	IAVF_PTT_UNUSED_ENTRY(976),
+	IAVF_PTT_UNUSED_ENTRY(977),
+	IAVF_PTT_UNUSED_ENTRY(978),
+	IAVF_PTT_UNUSED_ENTRY(979),
+
+	IAVF_PTT_UNUSED_ENTRY(980),
+	IAVF_PTT_UNUSED_ENTRY(981),
+	IAVF_PTT_UNUSED_ENTRY(982),
+	IAVF_PTT_UNUSED_ENTRY(983),
+	IAVF_PTT_UNUSED_ENTRY(984),
+	IAVF_PTT_UNUSED_ENTRY(985),
+	IAVF_PTT_UNUSED_ENTRY(986),
+	IAVF_PTT_UNUSED_ENTRY(987),
+	IAVF_PTT_UNUSED_ENTRY(988),
+	IAVF_PTT_UNUSED_ENTRY(989),
+
+	IAVF_PTT_UNUSED_ENTRY(990),
+	IAVF_PTT_UNUSED_ENTRY(991),
+	IAVF_PTT_UNUSED_ENTRY(992),
+	IAVF_PTT_UNUSED_ENTRY(993),
+	IAVF_PTT_UNUSED_ENTRY(994),
+	IAVF_PTT_UNUSED_ENTRY(995),
+	IAVF_PTT_UNUSED_ENTRY(996),
+	IAVF_PTT_UNUSED_ENTRY(997),
+	IAVF_PTT_UNUSED_ENTRY(998),
+	IAVF_PTT_UNUSED_ENTRY(999),
+
+	IAVF_PTT_UNUSED_ENTRY(1000),
+	IAVF_PTT_UNUSED_ENTRY(1001),
+	IAVF_PTT_UNUSED_ENTRY(1002),
+	IAVF_PTT_UNUSED_ENTRY(1003),
+	IAVF_PTT_UNUSED_ENTRY(1004),
+	IAVF_PTT_UNUSED_ENTRY(1005),
+	IAVF_PTT_UNUSED_ENTRY(1006),
+	IAVF_PTT_UNUSED_ENTRY(1007),
+	IAVF_PTT_UNUSED_ENTRY(1008),
+	IAVF_PTT_UNUSED_ENTRY(1009),
+
+	IAVF_PTT_UNUSED_ENTRY(1010),
+	IAVF_PTT_UNUSED_ENTRY(1011),
+	IAVF_PTT_UNUSED_ENTRY(1012),
+	IAVF_PTT_UNUSED_ENTRY(1013),
+	IAVF_PTT_UNUSED_ENTRY(1014),
+	IAVF_PTT_UNUSED_ENTRY(1015),
+	IAVF_PTT_UNUSED_ENTRY(1016),
+	IAVF_PTT_UNUSED_ENTRY(1017),
+	IAVF_PTT_UNUSED_ENTRY(1018),
+	IAVF_PTT_UNUSED_ENTRY(1019),
+
+	IAVF_PTT_UNUSED_ENTRY(1020),
+	IAVF_PTT_UNUSED_ENTRY(1021),
+	IAVF_PTT_UNUSED_ENTRY(1022),
+	IAVF_PTT_UNUSED_ENTRY(1023),
 };
 
 /**
@@ -924,7 +1769,7 @@ enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
  **/
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
-				enum iavf_status v_retval,
+				enum virtchnl_status_code v_retval,
 				u8 *msg, u16 msglen,
 				struct iavf_asq_cmd_details *cmd_details)
 {
@@ -1001,7 +1846,7 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
 {
 	return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
-				      IAVF_SUCCESS, NULL, 0, NULL);
+				      VIRTCHNL_STATUS_SUCCESS, NULL, 0, NULL);
 }
 
 /**
diff --git a/drivers/common/iavf/iavf_devids.h b/drivers/common/iavf/iavf_devids.h
index 2e63aac289..3e09e5feb8 100644
--- a/drivers/common/iavf/iavf_devids.h
+++ b/drivers/common/iavf/iavf_devids.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_DEVIDS_H_
@@ -13,5 +13,6 @@
 #define IAVF_DEV_ID_VF_HV		0x1571
 #define IAVF_DEV_ID_ADAPTIVE_VF		0x1889
 #define IAVF_DEV_ID_X722_VF		0x37CD
+#define IAVF_DEV_ID_X722_A0_VF          0x374D
 
 #endif /* _IAVF_DEVIDS_H_ */
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index f80878b9fd..4284585f5d 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2020 Intel Corporation
+ * Copyright(c) 2019-2021 Intel Corporation
  */
 
 #include <stdio.h>
diff --git a/drivers/common/iavf/iavf_osdep.h b/drivers/common/iavf/iavf_osdep.h
index 7cba13ff74..0ea7ea0efe 100644
--- a/drivers/common/iavf/iavf_osdep.h
+++ b/drivers/common/iavf/iavf_osdep.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2020 Intel Corporation
+ * Copyright(c) 2017-2021 Intel Corporation
  */
 
 #ifndef _IAVF_OSDEP_H_
@@ -123,6 +123,11 @@ writeq(uint64_t value, volatile void *addr)
 #define IAVF_PCI_REG_WRITE(reg, value)         writel(value, reg)
 #define IAVF_PCI_REG_WRITE_RELAXED(reg, value) writel_relaxed(value, reg)
 
+#define IAVF_PCI_REG_WC_WRITE(reg, value) \
+	rte_write32_wc((rte_cpu_to_le_32(value)), reg)
+#define IAVF_PCI_REG_WC_WRITE_RELAXED(reg, value) \
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg)
+
 #define IAVF_READ_REG(hw, reg)                 rd32(hw, reg)
 #define IAVF_WRITE_REG(hw, reg, value)         wr32(hw, reg, value)
 
diff --git a/drivers/common/iavf/iavf_prototype.h b/drivers/common/iavf/iavf_prototype.h
index f34e77db0f..16cb973bb8 100644
--- a/drivers/common/iavf/iavf_prototype.h
+++ b/drivers/common/iavf/iavf_prototype.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_PROTOTYPE_H_
@@ -69,7 +69,7 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
 
 extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
 
-STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u16 ptype)
 {
 	return iavf_ptype_lookup[ptype];
 }
@@ -87,7 +87,7 @@ enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
-				enum iavf_status v_retval,
+				enum virtchnl_status_code v_retval,
 				u8 *msg, u16 msglen,
 				struct iavf_asq_cmd_details *cmd_details);
 enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
diff --git a/drivers/common/iavf/iavf_register.h b/drivers/common/iavf/iavf_register.h
index 03d62a9da7..328100138d 100644
--- a/drivers/common/iavf/iavf_register.h
+++ b/drivers/common/iavf/iavf_register.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_REGISTER_H_
diff --git a/drivers/common/iavf/iavf_status.h b/drivers/common/iavf/iavf_status.h
index f425638063..4dd0f5c5d8 100644
--- a/drivers/common/iavf/iavf_status.h
+++ b/drivers/common/iavf/iavf_status.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_STATUS_H_
@@ -19,7 +19,7 @@ enum iavf_status {
 	IAVF_ERR_ADAPTER_STOPPED		= -9,
 	IAVF_ERR_INVALID_MAC_ADDR		= -10,
 	IAVF_ERR_DEVICE_NOT_SUPPORTED		= -11,
-	IAVF_ERR_MASTER_REQUESTS_PENDING	= -12,
+	IAVF_ERR_PRIMARY_REQUESTS_PENDING	= -12,
 	IAVF_ERR_INVALID_LINK_SETTINGS		= -13,
 	IAVF_ERR_AUTONEG_NOT_COMPLETE		= -14,
 	IAVF_ERR_RESET_FAILED			= -15,
diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 0990c9aa33..49d262d795 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _IAVF_TYPE_H_
@@ -141,6 +141,8 @@ enum iavf_debug_mask {
 #define IAVF_PHY_LED_MODE_MASK			0xFFFF
 #define IAVF_PHY_LED_MODE_ORIG			0x80000000
 
+#define IAVF_MAX_TRAFFIC_CLASS	8
+
 /* Memory types */
 enum iavf_memset_type {
 	IAVF_NONDMA_MEM = 0,
@@ -395,6 +397,45 @@ union iavf_16byte_rx_desc {
 	} wb;  /* writeback */
 };
 
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 2
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID higher 16-bits
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct iavf_32byte_rx_flex_wb {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	union {
+		struct {
+			__le16 rsvd;
+			__le16 flow_id_ipv6;
+		} flex;
+		__le32 ts_high;
+	} flex_ts;
+};
+
 union iavf_32byte_rx_desc {
 	struct {
 		__le64  pkt_addr; /* Packet buffer address */
@@ -442,6 +483,7 @@ union iavf_32byte_rx_desc {
 			} hi_dword;
 		} qword3;
 	} wb;  /* writeback */
+	struct iavf_32byte_rx_flex_wb flex_wb;
 };
 
 #define IAVF_RXD_QW0_MIRROR_STATUS_SHIFT	8
@@ -526,6 +568,51 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
 #define IAVF_RXD_QW1_PTYPE_SHIFT	30
 #define IAVF_RXD_QW1_PTYPE_MASK		(0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
 
+/* for iavf_32byte_rx_flex_wb.ptype_flexi_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+/* for iavf_32byte_rx_flex_wb.pkt_length member */
+#define IAVF_RX_FLEX_DESC_PKT_LEN_M	(0x3FFF) /* 14-bits */
+
+enum iavf_rx_flex_desc_status_error_0_bits {
+	/* Note: These are predefined bit offsets */
+	IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
+	IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
+	IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
+	IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
+	IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+	IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
+	IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
+	IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+	IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+enum iavf_rx_flex_desc_status_error_1_bits {
+	/* Note: These are predefined bit offsets */
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
+	/* [10:6] reserved */
+	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+	IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
 /* Packet type non-ip values */
 enum iavf_rx_l2_ptype {
 	IAVF_RX_PTYPE_L2_RESERVED			= 0,
@@ -558,7 +645,7 @@ enum iavf_rx_l2_ptype {
 };
 
 struct iavf_rx_ptype_decoded {
-	u32 ptype:8;
+	u32 ptype:10;
 	u32 known:1;
 	u32 outer_ip:1;
 	u32 outer_ip_ver:1;
@@ -721,6 +808,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/meson.build b/drivers/common/iavf/meson.build
index 1f4d8b898d..977652223b 100644
--- a/drivers/common/iavf/meson.build
+++ b/drivers/common/iavf/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2019-2020 Intel Corporation
+# Copyright(c) 2019-2021 Intel Corporation
 
 sources = files('iavf_adminq.c', 'iavf_common.c', 'iavf_impl.c')
 
diff --git a/drivers/common/iavf/siov_regs.h b/drivers/common/iavf/siov_regs.h
new file mode 100644
index 0000000000..d921269a5e
--- /dev/null
+++ b/drivers/common/iavf/siov_regs.h
@@ -0,0 +1,47 @@
+#ifndef _SIOV_REGS_H_
+#define _SIOV_REGS_H_
+#define VDEV_MBX_START			0x20000 /* Begin at 128KB */
+#define VDEV_MBX_ATQBAL			(VDEV_MBX_START + 0x0000)
+#define VDEV_MBX_ATQBAH			(VDEV_MBX_START + 0x0004)
+#define VDEV_MBX_ATQLEN			(VDEV_MBX_START + 0x0008)
+#define VDEV_MBX_ATQH			(VDEV_MBX_START + 0x000C)
+#define VDEV_MBX_ATQT			(VDEV_MBX_START + 0x0010)
+#define VDEV_MBX_ARQBAL			(VDEV_MBX_START + 0x0014)
+#define VDEV_MBX_ARQBAH			(VDEV_MBX_START + 0x0018)
+#define VDEV_MBX_ARQLEN			(VDEV_MBX_START + 0x001C)
+#define VDEV_MBX_ARQH			(VDEV_MBX_START + 0x0020)
+#define VDEV_MBX_ARQT			(VDEV_MBX_START + 0x0024)
+#define VDEV_GET_RSTAT			0x21000 /* 132KB for RSTAT */
+
+/* Begin at offset after 1MB (after 256 4k pages) */
+#define VDEV_QRX_TAIL_START       0x100000
+ /* 2k Rx queues */
+#define VDEV_QRX_TAIL(_i)         (VDEV_QRX_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 9MB for  Rx buffer queue tail register pages */
+#define VDEV_QRX_BUFQ_TAIL_START  0x900000
+/* 2k Rx buffer queues */
+#define VDEV_QRX_BUFQ_TAIL(_i)    (VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 17MB for 2k Tx queues */
+#define VDEV_QTX_TAIL_START       0x1100000
+/* 2k Tx queues */
+#define VDEV_QTX_TAIL(_i)         (VDEV_QTX_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 25MB for 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL_START 0x1900000
+/* 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL(_i)   (VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000))
+
+#define VDEV_INT_DYN_CTL01        0x2100000 /* Begin at offset 33MB */
+
+/* Begin at offset of 33MB + 4k to accomdate CTL01 register */
+#define VDEV_INT_DYN_START   (VDEV_INT_DYN_CTL01 + 0x1000)
+#define VDEV_INT_DYN_CTL(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000))
+#define VDEV_INT_ITR_0(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04)
+#define VDEV_INT_ITR_1(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08)
+#define VDEV_INT_ITR_2(_i)   (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C)
+
+/* Next offset to begin at 42MB (0x2A00000) */
+#endif /* _SIOV_REGS_H_ */
+
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 4c34d35ba7..d015a785b9 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -1,13 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
+ * Copyright(c) 2001-2021 Intel Corporation
  */
 
 #ifndef _VIRTCHNL_H_
 #define _VIRTCHNL_H_
 
 /* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
  *
  * Admin queue buffer usage:
  * desc->opcode is always aqc_opc_send_msg_to_pf
@@ -21,8 +22,8 @@
  * have a maximum of sixteen queues for all of its VSIs.
  *
  * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
  *
  * In general, VF driver initialization should roughly follow the order of
  * these opcodes. The VF driver must first validate the API version of the
@@ -37,7 +38,13 @@
  * value in current and future projects
  */
 
-/* Error Codes */
+#include "virtchnl_inline_ipsec.h"
+
+/* Error Codes
+ * Note that many older versions of various iAVF drivers convert the reported
+ * status code directly into an iavf_status enumeration. For this reason, it
+ * is important that the values of these enumerations line up.
+ */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
@@ -84,6 +91,10 @@ enum virtchnl_rx_hsplit {
 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
 };
 
+enum virtchnl_bw_limit_type {
+	VIRTCHNL_BW_SHAPER = 0,
+};
+
 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS	6
 /* END GENERIC DEFINES */
 
@@ -128,7 +139,10 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, 37 and 38 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
+	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
+	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
 	VIRTCHNL_OP_DCF_CMD_BUFF = 40,
 	VIRTCHNL_OP_DCF_DISABLE = 41,
@@ -139,14 +153,162 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
-	VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
 	VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
+	VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+	VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+	VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+	VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
+	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_GET_QOS_CAPS = 66,
+	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_MAX,
 };
 
+static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
+{
+	switch (v_opcode) {
+	case VIRTCHNL_OP_UNKNOWN:
+		return "VIRTCHNL_OP_UNKNOWN";
+	case VIRTCHNL_OP_VERSION:
+		return "VIRTCHNL_OP_VERSION";
+	case VIRTCHNL_OP_RESET_VF:
+		return "VIRTCHNL_OP_RESET_VF";
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		return "VIRTCHNL_OP_GET_VF_RESOURCES";
+	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_TX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_RX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		return "VIRTCHNL_OP_CONFIG_VSI_QUEUES";
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		return "VIRTCHNL_OP_CONFIG_IRQ_MAP";
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+		return "VIRTCHNL_OP_ENABLE_QUEUES";
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		return "VIRTCHNL_OP_DISABLE_QUEUES";
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+		return "VIRTCHNL_OP_ADD_ETH_ADDR";
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		return "VIRTCHNL_OP_DEL_ETH_ADDR";
+	case VIRTCHNL_OP_ADD_VLAN:
+		return "VIRTCHNL_OP_ADD_VLAN";
+	case VIRTCHNL_OP_DEL_VLAN:
+		return "VIRTCHNL_OP_DEL_VLAN";
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE";
+	case VIRTCHNL_OP_GET_STATS:
+		return "VIRTCHNL_OP_GET_STATS";
+	case VIRTCHNL_OP_RSVD:
+		return "VIRTCHNL_OP_RSVD";
+	case VIRTCHNL_OP_EVENT:
+		return "VIRTCHNL_OP_EVENT";
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		return "VIRTCHNL_OP_CONFIG_RSS_KEY";
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		return "VIRTCHNL_OP_CONFIG_RSS_LUT";
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		return "VIRTCHNL_OP_GET_RSS_HENA_CAPS";
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		return "VIRTCHNL_OP_SET_RSS_HENA";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		return "VIRTCHNL_OP_REQUEST_QUEUES";
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		return "VIRTCHNL_OP_ENABLE_CHANNELS";
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		return "VIRTCHNL_OP_DISABLE_CHANNELS";
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
+	case VIRTCHNL_OP_DCF_CMD_DESC:
+		return "VIRTCHNL_OP_DCF_CMD_DESC";
+	case VIRTCHNL_OP_DCF_CMD_BUFF:
+		return "VIRTCHHNL_OP_DCF_CMD_BUFF";
+	case VIRTCHNL_OP_DCF_DISABLE:
+		return "VIRTCHNL_OP_DCF_DISABLE";
+	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+		return "VIRTCHNL_OP_DCF_GET_VSI_MAP";
+	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+		return "VIRTCHNL_OP_GET_SUPPORTED_RXDIDS";
+	case VIRTCHNL_OP_ADD_RSS_CFG:
+		return "VIRTCHNL_OP_ADD_RSS_CFG";
+	case VIRTCHNL_OP_DEL_RSS_CFG:
+		return "VIRTCHNL_OP_DEL_RSS_CFG";
+	case VIRTCHNL_OP_ADD_FDIR_FILTER:
+		return "VIRTCHNL_OP_ADD_FDIR_FILTER";
+	case VIRTCHNL_OP_DEL_FDIR_FILTER:
+		return "VIRTCHNL_OP_DEL_FDIR_FILTER";
+	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
+		return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
+	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
+	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
+	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
+		return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+		return "VIRTCHNL_OP_ADD_VLAN_V2";
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		return "VIRTCHNL_OP_DEL_VLAN_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_MAX:
+		return "VIRTCHNL_OP_MAX";
+	default:
+		return "Unsupported (update virtchnl.h)";
+	}
+}
+
+static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)
+{
+	switch (v_status) {
+	case VIRTCHNL_STATUS_SUCCESS:
+		return "VIRTCHNL_STATUS_SUCCESS";
+	case VIRTCHNL_STATUS_ERR_PARAM:
+		return "VIRTCHNL_STATUS_ERR_PARAM";
+	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+		return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
+	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
+	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
+	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
+	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+		return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
+	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+		return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
+	default:
+		return "Unknown status code (update virtchnl.h)";
+	}
+}
+
 /* These macros are used to generate compilation errors if a structure/union
  * is not exactly the correct length. It gives a divide by zero error if the
  * structure/union is not of the correct size, otherwise it creates an enum
@@ -163,8 +325,12 @@ enum virtchnl_ops {
 
 struct virtchnl_msg {
 	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
-	enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
-	enum virtchnl_status_code v_retval;  /* ditto for desc->retval */
+
+	/* avoid confusion with desc->opcode */
+	enum virtchnl_ops v_opcode;
+
+	/* ditto for desc->retval */
+	enum virtchnl_status_code v_retval;
 	u32 vfid;			 /* used by PF when sending to VF */
 };
 
@@ -230,7 +396,9 @@ enum virtchnl_vsi_type {
 struct virtchnl_vsi_resource {
 	u16 vsi_id;
 	u16 num_queue_pairs;
-	enum virtchnl_vsi_type vsi_type;
+
+	/* see enum virtchnl_vsi_type */
+	s32 vsi_type;
 	u16 qset_handle;
 	u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
 };
@@ -241,34 +409,35 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
  */
-#define VIRTCHNL_VF_OFFLOAD_L2			0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD		0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG		0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		0x00000040
+#define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_IWARP		BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RSVD		BIT(2)
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		0x00000080
-	/* 0X00000100 is reserved */
-#define VIRTCHNL_VF_LARGE_NUM_QPAIRS		0x00000200
-#define VIRTCHNL_VF_OFFLOAD_CRC			0x00000400
-#define VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP		0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ			0X00800000
-#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		0X01000000
-#define VIRTCHNL_VF_OFFLOAD_USO			0X02000000
-#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	0X04000000
-#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		0X08000000
-#define VIRTCHNL_VF_OFFLOAD_FDIR_PF		0X10000000
-	/* 0X20000000 is reserved */
-#define VIRTCHNL_VF_CAP_DCF			0X40000000
-	/* 0X80000000 is reserved */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
+#define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
+#define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		BIT(24)
+#define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC	BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS		BIT(29)
+#define VIRTCHNL_VF_CAP_DCF			BIT(30)
+	/* BIT(31) is reserved */
 
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -307,6 +476,54 @@ struct virtchnl_txq_info {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
 
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+	VIRTCHNL_RXDID_0_16B_BASE		= 0,
+	/* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors
+	 * because they can be differentiated based on queue model; e.g. single
+	 * queue model can only use 32B_BASE and split queue model can only use
+	 * FLEX_SPLITQ.  Having these as 1 allows them to be used as default
+	 * descriptors without negotiation.
+	 */
+	VIRTCHNL_RXDID_1_32B_BASE		= 1,
+	VIRTCHNL_RXDID_1_FLEX_SPLITQ		= 1,
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC		= 2,
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW		= 3,
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB	= 4,
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL	= 5,
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2		= 6,
+	VIRTCHNL_RXDID_7_HW_RSVD		= 7,
+	/* 9 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC		= 16,
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN	= 17,
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4	= 18,
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6	= 19,
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW	= 20,
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP		= 21,
+	/* 22 through 63 are reserved */
+};
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+	VIRTCHNL_RXDID_0_16B_BASE_M		= BIT(VIRTCHNL_RXDID_0_16B_BASE),
+	VIRTCHNL_RXDID_1_32B_BASE_M		= BIT(VIRTCHNL_RXDID_1_32B_BASE),
+	VIRTCHNL_RXDID_1_FLEX_SPLITQ_M		= BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ),
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M		= BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW_M		= BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M	= BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M	= BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M	= BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),
+	VIRTCHNL_RXDID_7_HW_RSVD_M		= BIT(VIRTCHNL_RXDID_7_HW_RSVD),
+	/* 9 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC_M	= BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M	= BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M	= BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M	= BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M	= BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M	= BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),
+	/* 22 through 63 are reserved */
+};
+
 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
  * VF sends this message to set up parameters for one RX queue.
  * External data buffer contains one instance of virtchnl_rxq_info.
@@ -329,11 +546,17 @@ struct virtchnl_rxq_info {
 	u32 databuffer_size;
 	u32 max_pkt_size;
 	u8 crc_disable;
-	/* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
+	/* see enum virtchnl_rx_desc_ids;
+	 * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
+	 * that when the offload is not supported, the descriptor format aligns
+	 * with VIRTCHNL_RXDID_1_32B_BASE.
+	 */
 	u8 rxdid;
 	u8 pad1[2];
 	u64 dma_ring_addr;
-	enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+	s32 rx_split_pos;
 	u32 pad2;
 };
 
@@ -536,6 +759,388 @@ struct virtchnl_vlan_filter_list {
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
 
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ *	VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ *	VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+	VIRTCHNL_VLAN_UNSUPPORTED =		0,
+	VIRTCHNL_VLAN_ETHERTYPE_8100 =		0x00000001,
+	VIRTCHNL_VLAN_ETHERTYPE_88A8 =		0x00000002,
+	VIRTCHNL_VLAN_ETHERTYPE_9100 =		0x00000004,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =	0x00000100,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =	0x00000200,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =	0x00000400,
+	VIRTCHNL_VLAN_PRIO =			0x01000000,
+	VIRTCHNL_VLAN_FILTER_MASK =		0x10000000,
+	VIRTCHNL_VLAN_ETHERTYPE_AND =		0x20000000,
+	VIRTCHNL_VLAN_ETHERTYPE_XOR =		0x40000000,
+	VIRTCHNL_VLAN_TOGGLE =			0x80000000
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+	u32 outer;
+	u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+	struct virtchnl_vlan_supported_caps filtering_support;
+	u32 ethertype_init;
+	u16 max_filters;
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+	VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+	VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+	struct virtchnl_vlan_supported_caps stripping_support;
+	struct virtchnl_vlan_supported_caps insertion_support;
+	u32 ethertype_init;
+	u8 ethertype_match;
+	u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+	struct virtchnl_vlan_filtering_caps filtering;
+	struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+	u16 tci;	/* tci[15:13] = PCP and tci[11:0] = VID */
+	u16 tci_mask;	/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+			 * filtering caps
+			 */
+	u16 tpid;	/* 0x8100, 0x88a8, etc. and only type(s) set in
+			 * filtering caps. Note that tpid here does not refer to
+			 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+			 * actual 2-byte VLAN TPID
+			 */
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+	struct virtchnl_vlan inner;
+	struct virtchnl_vlan outer;
+	u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+	u16 vport_id;
+	u16 num_elements;
+	u8 pad[4];
+	struct virtchnl_vlan_filter filters[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ *			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2
+ *
+ * VF sends this message to enable or disable VLAN filtering. It also needs to
+ * specify an ethertype. The VF knows which VLAN ethertypes are allowed and
+ * whether or not it's allowed to enable/disable filtering via the
+ * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.filtering fields to determine which, if any,
+ * filtering messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.filtering in the
+ * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8
+ * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND
+ * means that all filtering ethertypes will to be enabled and disabled together
+ * regardless of the request from the VF. This means that the underlying
+ * hardware only supports VLAN filtering for all VLAN the specified ethertypes
+ * or none of them.
+ *
+ * virtchnl_vlan_caps.filtering.filtering_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100
+ * VLANs aren't supported by the VF driver), the VF would populate the
+ * virtchnl_vlan_setting structure in the following manner and send the
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used
+ * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ *
+ */
+struct virtchnl_vlan_setting {
+	u32 outer_ethertype_setting;
+	u32 inner_ethertype_setting;
+	u16 vport_id;
+	u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
+
 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
  * VF sends VSI id and flags.
  * PF returns status code in retval.
@@ -706,14 +1311,26 @@ enum virtchnl_flow_type {
 struct virtchnl_filter {
 	union	virtchnl_flow_spec data;
 	union	virtchnl_flow_spec mask;
-	enum	virtchnl_flow_type flow_type;
-	enum	virtchnl_action action;
+
+	/* see enum virtchnl_flow_type */
+	s32	flow_type;
+
+	/* see enum virtchnl_action */
+	s32	action;
 	u32	action_meta;
 	u8	field_flags;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
 
+struct virtchnl_shaper_bw {
+	/* Unit is Kbps */
+	u32 committed;
+	u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
 /* VIRTCHNL_OP_DCF_GET_VSI_MAP
  * VF sends this message to get VSI mapping table.
  * PF responds with an indirect message containing VF's
@@ -754,7 +1371,71 @@ struct virtchnl_pkg_info {
 
 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
 
+/* VIRTCHNL_OP_DCF_VLAN_OFFLOAD
+ * DCF negotiates the VIRTCHNL_VF_OFFLOAD_VLAN_V2 capability firstly to get
+ * the double VLAN configuration, then DCF sends this message to configure the
+ * outer or inner VLAN offloads (insertion and strip) for the target VF.
+ */
+struct virtchnl_dcf_vlan_offload {
+	u16 vf_id;
+	u16 tpid;
+	u16 vlan_flags;
+#define VIRTCHNL_DCF_VLAN_TYPE_S		0
+#define VIRTCHNL_DCF_VLAN_TYPE_M		\
+			(0x1 << VIRTCHNL_DCF_VLAN_TYPE_S)
+#define VIRTCHNL_DCF_VLAN_TYPE_INNER		0x0
+#define VIRTCHNL_DCF_VLAN_TYPE_OUTER		0x1
+#define VIRTCHNL_DCF_VLAN_INSERT_MODE_S		1
+#define VIRTCHNL_DCF_VLAN_INSERT_MODE_M	\
+			(0x7 << VIRTCHNL_DCF_VLAN_INSERT_MODE_S)
+#define VIRTCHNL_DCF_VLAN_INSERT_DISABLE	0x1
+#define VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED	0x2
+#define VIRTCHNL_DCF_VLAN_INSERT_VIA_TX_DESC	0x3
+#define VIRTCHNL_DCF_VLAN_STRIP_MODE_S		4
+#define VIRTCHNL_DCF_VLAN_STRIP_MODE_M		\
+			(0x7 << VIRTCHNL_DCF_VLAN_STRIP_MODE_S)
+#define VIRTCHNL_DCF_VLAN_STRIP_DISABLE		0x1
+#define VIRTCHNL_DCF_VLAN_STRIP_ONLY		0x2
+#define VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC	0x3
+	u16 vlan_id;
+	u16 pad[4];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_dcf_vlan_offload);
+
+struct virtchnl_dcf_bw_cfg {
+	u8 tc_num;
+#define VIRTCHNL_DCF_BW_CIR		BIT(0)
+#define VIRTCHNL_DCF_BW_PIR		BIT(1)
+	u8 bw_type;
+	u8 pad[2];
+	enum virtchnl_bw_limit_type type;
+	union {
+		struct virtchnl_shaper_bw shaper;
+		u8 pad2[32];
+	};
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_dcf_bw_cfg);
+
+/* VIRTCHNL_OP_DCF_CONFIG_BW
+ * VF send this message to set the bandwidth configuration of each
+ * TC with a specific vf id. The flag node_type is to indicate that
+ * this message is to configure VSI node or TC node bandwidth.
+ */
+struct virtchnl_dcf_bw_cfg_list {
+	u16 vf_id;
+	u8 num_elem;
+#define VIRTCHNL_DCF_TARGET_TC_BW	0
+#define VIRTCHNL_DCF_TARGET_VF_BW	1
+	u8 node_type;
+	struct virtchnl_dcf_bw_cfg cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_dcf_bw_cfg_list);
+
 struct virtchnl_supported_rxdids {
+	/* see enum virtchnl_rx_desc_id_bitmasks */
 	u64 supported_rxdids;
 };
 
@@ -779,7 +1460,8 @@ enum virtchnl_event_codes {
 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
 
 struct virtchnl_pf_event {
-	enum virtchnl_event_codes event;
+	/* see enum virtchnl_event_codes */
+	s32 event;
 	union {
 		/* If the PF driver does not support the new speed reporting
 		 * capabilities then use link_event else use link_event_adv to
@@ -890,6 +1572,16 @@ enum virtchnl_proto_hdr_type {
 	VIRTCHNL_PROTO_HDR_AH,
 	VIRTCHNL_PROTO_HDR_PFCP,
 	VIRTCHNL_PROTO_HDR_GTPC,
+	VIRTCHNL_PROTO_HDR_ECPRI,
+	VIRTCHNL_PROTO_HDR_L2TPV2,
+	VIRTCHNL_PROTO_HDR_PPP,
+	/* IPv4 and IPv6 Fragment header types are only associated to
+	 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+	 * cannot be used independently.
+	 */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+	VIRTCHNL_PROTO_HDR_GRE,
 };
 
 /* Protocol header field within a protocol header. */
@@ -912,6 +1604,7 @@ enum virtchnl_proto_hdr_field {
 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
+	VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
 	/* IPV6 */
 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
@@ -936,14 +1629,17 @@ enum virtchnl_proto_hdr_field {
 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
 	/* UDP */
 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
 	/* SCTP */
 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
 	/* GTPU_IP */
 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
@@ -970,10 +1666,30 @@ enum virtchnl_proto_hdr_field {
 	/* GTPC */
 	VIRTCHNL_PROTO_HDR_GTPC_TEID =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+	/* ECPRI */
+	VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+	VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+	/* IPv4 Dummy Fragment */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+	/* IPv6 Extension Fragment */
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+	/* GTPU_DWN/UP */
+	VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+	VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+	/* L2TPv2 */
+	VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+	VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
 };
 
 struct virtchnl_proto_hdr {
-	enum virtchnl_proto_hdr_type type;
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
 	u32 field_selector; /* a bit mask to select field for header type */
 	u8 buffer[64];
 	/**
@@ -1002,7 +1718,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
 
 struct virtchnl_rss_cfg {
 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
-	enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
+
+	/* see enum virtchnl_rss_algorithm; rss algorithm type */
+	s32 rss_algorithm;
 	u8 reserved[128];                          /* reserve for future */
 };
 
@@ -1010,7 +1728,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
 /* action configuration for FDIR */
 struct virtchnl_filter_action {
-	enum virtchnl_action type;
+	/* see enum virtchnl_action type */
+	s32 type;
 	union {
 		/* used for queue and qgroup action */
 		struct {
@@ -1049,20 +1768,6 @@ struct virtchnl_fdir_rule {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
 
-/* query information to retrieve fdir rule counters.
- * PF will fill out this structure to reset counter.
- */
-struct virtchnl_fdir_query_info {
-	u32 match_packets_valid:1;
-	u32 match_bytes_valid:1;
-	u32 reserved:30;  /* Reserved, must be zero. */
-	u32 pad;
-	u64 matched_packets; /* Number of packets for this rule. */
-	u64 matched_bytes;   /* Number of bytes through this rule. */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
-
 /* Status returned to VF after VF requests FDIR commands
  * VIRTCHNL_FDIR_SUCCESS
  * VF FDIR related request is successfully done by PF
@@ -1117,7 +1822,9 @@ struct virtchnl_fdir_add {
 	u16 validate_only; /* INPUT */
 	u32 flow_id;       /* OUTPUT */
 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
-	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
@@ -1130,27 +1837,69 @@ struct virtchnl_fdir_del {
 	u16 vsi_id;  /* INPUT */
 	u16 pad;
 	u32 flow_id; /* INPUT */
-	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
-/* VIRTCHNL_OP_QUERY_FDIR_FILTER
- * VF sends this request to PF by filling out vsi_id,
- * flow_id and reset_counter. PF will return query_info
- * and query_status to VF.
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
  */
-struct virtchnl_fdir_query {
-	u16 vsi_id;   /* INPUT */
-	u16 pad1[3];
-	u32 flow_id;  /* INPUT */
-	u32 reset_counter:1; /* INPUT */
-	struct virtchnl_fdir_query_info query_info; /* OUTPUT */
-	enum virtchnl_fdir_prgm_status status;  /* OUTPUT */
-	u32 pad2;
+struct virtchnl_qos_cap_elem {
+	u8 tc_num;
+	u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT      0
+#define VIRTCHNL_ABITER_ETS         2
+	u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT      1
+	u8 weight;
+	enum virtchnl_bw_limit_type type;
+	union {
+		struct virtchnl_shaper_bw shaper;
+		u8 pad2[32];
+	};
 };
 
-VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+	u16 vsi_id;
+	u16 num_elem;
+	struct virtchnl_qos_cap_elem cap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP
+ * VF sends message virtchnl_queue_tc_mapping to set queue to tc
+ * mapping for all the Tx and Rx queues with a specified VSI, and
+ * would get response about bitmap of valid user priorities
+ * associated with queues.
+ */
+struct virtchnl_queue_tc_mapping {
+	u16 vsi_id;
+	u16 num_tc;
+	u16 num_queue_pairs;
+	u8 pad[2];
+	union {
+		struct {
+			u16 start_queue_id;
+			u16 queue_count;
+		} req;
+		struct {
+#define VIRTCHNL_USER_PRIO_TYPE_UP	0
+#define VIRTCHNL_USER_PRIO_TYPE_DSCP	1
+			u16 prio_type;
+			u16 valid_prio_bitmap;
+		} resp;
+	} tc[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
+
 
 /* TX and RX queue types are valid in legacy as well as split queue models.
  * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
@@ -1169,7 +1918,8 @@ enum virtchnl_queue_type {
 
 /* structure to specify a chunk of contiguous queues */
 struct virtchnl_queue_chunk {
-	enum virtchnl_queue_type type;
+	/* see enum virtchnl_queue_type */
+	s32 type;
 	u16 start_queue_id;
 	u16 num_queues;
 };
@@ -1222,8 +1972,12 @@ struct virtchnl_queue_vector {
 	u16 queue_id;
 	u16 vector_id;
 	u8 pad[4];
-	enum virtchnl_itr_idx itr_idx;
-	enum virtchnl_queue_type queue_type;
+
+	/* see enum virtchnl_itr_idx */
+	s32 itr_idx;
+
+	/* see enum virtchnl_queue_type */
+	s32 queue_type;
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
@@ -1286,6 +2040,10 @@ enum virtchnl_vector_limits {
 	VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX	=
 		((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
 		sizeof(struct virtchnl_queue_vector),
+
+	VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /
+		sizeof(struct virtchnl_vlan_filter),
 };
 
 /**
@@ -1460,6 +2218,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		valid_len = sizeof(struct virtchnl_filter);
 		break;
+	case VIRTCHNL_OP_DCF_VLAN_OFFLOAD:
+		valid_len = sizeof(struct virtchnl_dcf_vlan_offload);
+		break;
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
 		/* These two opcodes are specific to handle the AdminQ command,
@@ -1471,6 +2232,19 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
 	case VIRTCHNL_OP_DCF_GET_PKG_INFO:
 		break;
+	case VIRTCHNL_OP_DCF_CONFIG_BW:
+		valid_len = sizeof(struct virtchnl_dcf_bw_cfg_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_dcf_bw_cfg_list *cfg_list =
+				(struct virtchnl_dcf_bw_cfg_list *)msg;
+			if (cfg_list->num_elem == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (cfg_list->num_elem - 1) *
+					 sizeof(struct virtchnl_dcf_bw_cfg);
+		}
+		break;
 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
 		break;
 	case VIRTCHNL_OP_ADD_RSS_CFG:
@@ -1483,8 +2257,47 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
-	case VIRTCHNL_OP_QUERY_FDIR_FILTER:
-		valid_len = sizeof(struct virtchnl_fdir_query);
+	case VIRTCHNL_OP_GET_QOS_CAPS:
+		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
+		valid_len = sizeof(struct virtchnl_queue_tc_mapping);
+		if (msglen >= valid_len) {
+			struct virtchnl_queue_tc_mapping *q_tc =
+				(struct virtchnl_queue_tc_mapping *)msg;
+			if (q_tc->num_tc == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_tc->num_tc - 1) *
+					 sizeof(q_tc->tc[0]);
+		}
+		break;
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		break;
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+		if (msglen >= valid_len) {
+			struct virtchnl_vlan_filter_list_v2 *vfl =
+			    (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+			if (vfl->num_elements == 0 || vfl->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vfl->num_elements - 1) *
+				sizeof(struct virtchnl_vlan_filter);
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		valid_len = sizeof(struct virtchnl_vlan_setting);
 		break;
 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
@@ -1515,6 +2328,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..2f4bf15725
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+
+	/* 0 for NAT-T unsupported, 1 for NAT-T supported */
+	u8 is_udp;
+
+	/* reserved */
+	u8 reserved;
+
+	/* NAT-T UDP port number. Only valid in case NAT-T supported */
+	u16 udp_port;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 6af2993116..b1da53db2b 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -6,7 +6,7 @@ Intel® I40E driver
 ==================
 
 This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2020.08.27.tar.gz released by the team which develops
+cid-i40e.2022.03.08.tar.gz released by the team which develops
 basic drivers for any i40e NIC. The directory of base/ contains the
 original source package.
 This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index c63a38e900..27c82d9b44 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -840,7 +840,7 @@ STATIC bool i40e_asq_done(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_asq_send_command - send command to Admin Queue
+ *  i40e_asq_send_command_exec - send command to Admin Queue
  *  @hw: pointer to the hw struct
  *  @desc: prefilled descriptor describing the command (non DMA mem)
  *  @buff: buffer to use for indirect commands
@@ -850,11 +850,12 @@ STATIC bool i40e_asq_done(struct i40e_hw *hw)
  *  This is the main send command driver routine for the Admin Queue send
  *  queue.  It runs the queue, cleans the queue, etc
  **/
-enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
-				struct i40e_aq_desc *desc,
-				void *buff, /* can be NULL */
-				u16  buff_size,
-				struct i40e_asq_cmd_details *cmd_details)
+STATIC enum i40e_status_code
+i40e_asq_send_command_exec(struct i40e_hw *hw,
+			   struct i40e_aq_desc *desc,
+			   void *buff, /* can be NULL */
+			   u16  buff_size,
+			   struct i40e_asq_cmd_details *cmd_details)
 {
 	enum i40e_status_code status = I40E_SUCCESS;
 	struct i40e_dma_mem *dma_buff = NULL;
@@ -864,8 +865,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 	u16  retval = 0;
 	u32  val = 0;
 
-	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
 
 	if (hw->aq.asq.count == 0) {
@@ -1048,6 +1047,64 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 	}
 
 asq_send_command_error:
+	return status;
+}
+
+/**
+ *  i40e_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  Acquires the lock and calls the main send command execution
+ *  routine.
+ **/
+enum i40e_status_code
+i40e_asq_send_command(struct i40e_hw *hw,
+		      struct i40e_aq_desc *desc,
+		      void *buff, /* can be NULL */
+		      u16  buff_size,
+		      struct i40e_asq_cmd_details *cmd_details)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+	status = i40e_asq_send_command_exec(hw, desc, buff, buff_size,
+					    cmd_details);
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
+	return status;
+}
+
+/**
+ *  i40e_asq_send_command_v2 - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *  @aq_status: pointer to Admin Queue status return value
+ *
+ *  Acquires the lock and calls the main send command execution
+ *  routine. Returns the last Admin Queue status in aq_status
+ *  to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+enum i40e_status_code
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+			 struct i40e_aq_desc *desc,
+			 void *buff, /* can be NULL */
+			 u16  buff_size,
+			 struct i40e_asq_cmd_details *cmd_details,
+			 enum i40e_admin_queue_err *aq_status)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+	status = i40e_asq_send_command_exec(hw, desc, buff, buff_size,
+					    cmd_details);
+	if (aq_status)
+		*aq_status = hw->aq.asq_last_status;
 	i40e_release_spinlock(&hw->aq.asq_spinlock);
 	return status;
 }
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 4d80568050..def307b59d 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -12,8 +12,8 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR_X722	0x000B
-#define I40E_FW_API_VERSION_MINOR_X710	0x000C
+#define I40E_FW_API_VERSION_MINOR_X722	0x000C
+#define I40E_FW_API_VERSION_MINOR_X710	0x000F
 
 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
 					I40E_FW_API_VERSION_MINOR_X710 : \
@@ -768,6 +768,7 @@ struct i40e_aqc_set_switch_config {
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC		0x0001
 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER	0x0002
 #define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT	0x0004
+#define I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN	0x0008
 	__le16	valid_flags;
 	/* The ethertype in switch_tag is dropped on ingress and used
 	 * internally by the switch. Set this to zero for the default
@@ -904,7 +905,7 @@ struct i40e_aqc_vsi_properties_data {
 	u8	sec_reserved;
 	/* VLAN section */
 	__le16	pvid; /* VLANS include priority bits */
-	__le16	fcoe_pvid;
+	__le16	outer_vlan;
 	u8	port_vlan_flags;
 #define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
 #define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
@@ -920,7 +921,24 @@ struct i40e_aqc_vsi_properties_data {
 #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
 #define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
-	u8	pvlan_reserved[3];
+	u8	outer_vlan_flags;
+#define I40E_AQ_VSI_OVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_OVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_OVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_OVLAN_MODE_UNTAGGED	0x01
+#define I40E_AQ_VSI_OVLAN_MODE_TAGGED	0x02
+#define I40E_AQ_VSI_OVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_OVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_OVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_OVLAN_EMOD_MASK	(0x03 <<\
+					 I40E_AQ_VSI_OVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_OVLAN_EMOD_SHOW_ALL	0x00
+#define I40E_AQ_VSI_OVLAN_EMOD_SHOW_UP	0x01
+#define I40E_AQ_VSI_OVLAN_EMOD_HIDE_ALL	0x02
+#define I40E_AQ_VSI_OVLAN_EMOD_NOTHING	0x03
+#define I40E_AQ_VSI_OVLAN_CTRL_ENA	0x04
+
+	u8	pvlan_reserved[2];
 	/* ingress egress up sections */
 	__le32	ingress_table; /* bitmap, 3 bits per up */
 #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
@@ -2017,6 +2035,15 @@ enum i40e_aq_link_speed {
 	I40E_LINK_SPEED_25GB	= (1 << I40E_LINK_SPEED_25GB_SHIFT),
 };
 
+enum i40e_prt_mac_pcs_link_speed {
+	I40E_PRT_MAC_PCS_LINK_SPEED_UNKNOWN = 0,
+	I40E_PRT_MAC_PCS_LINK_SPEED_100MB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_1GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_10GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_40GB,
+	I40E_PRT_MAC_PCS_LINK_SPEED_20GB
+};
+
 struct i40e_aqc_module_desc {
 	u8 oui[3];
 	u8 reserved1;
@@ -2427,11 +2454,15 @@ struct i40e_aqc_rollback_revision_update {
 	u8	optin_mode; /* bool */
 #define I40E_AQ_RREV_OPTION_MODE			0x01
 	u8	module_selected;
-#define I40E_AQ_RREV_MODULE_PCIE_ANALOG		0
-#define I40E_AQ_RREV_MODULE_PHY_ANALOG		1
-#define I40E_AQ_RREV_MODULE_OPTION_ROM		2
-#define I40E_AQ_RREV_MODULE_EMP_IMAGE		3
-#define I40E_AQ_RREV_MODULE_PE_IMAGE		4
+#define I40E_AQ_RREV_MODULE_PCIE_ANALOG			0
+#define I40E_AQ_RREV_MODULE_PHY_ANALOG			1
+#define I40E_AQ_RREV_MODULE_OPTION_ROM			2
+#define I40E_AQ_RREV_MODULE_EMP_IMAGE			3
+#define I40E_AQ_RREV_MODULE_PE_IMAGE			4
+#define I40E_AQ_RREV_MODULE_PHY_PLL_O_CONFIGURATION	5
+#define I40E_AQ_RREV_MODULE_PHY_0_CONFIGURATION		6
+#define I40E_AQ_RREV_MODULE_PHY_PLL_1_CONFIGURATION	7
+#define I40E_AQ_RREV_MODULE_PHY_1_CONFIGURATION		8
 	u8	reserved1[2];
 	u32	min_rrev;
 	u8	reserved2[8];
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index f11d25d0d8..122a3782a4 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -37,6 +37,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
 		case I40E_DEV_ID_10G_B:
 		case I40E_DEV_ID_10G_SFP:
 		case I40E_DEV_ID_5G_BASE_T_BC:
+		case I40E_DEV_ID_1G_BASE_T_BC:
 		case I40E_DEV_ID_20G_KR2:
 		case I40E_DEV_ID_20G_KR2_A:
 		case I40E_DEV_ID_25G_B:
@@ -54,6 +55,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
 		case I40E_DEV_ID_1G_BASE_T_X722:
 		case I40E_DEV_ID_10G_BASE_T_X722:
 		case I40E_DEV_ID_SFP_I_X722:
+		case I40E_DEV_ID_SFP_X722_A:
 			hw->mac.type = I40E_MAC_X722;
 			break;
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -176,8 +178,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
 		return "I40E_ERR_INVALID_MAC_ADDR";
 	case I40E_ERR_DEVICE_NOT_SUPPORTED:
 		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
-	case I40E_ERR_MASTER_REQUESTS_PENDING:
-		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+		return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
 	case I40E_ERR_INVALID_LINK_SETTINGS:
 		return "I40E_ERR_INVALID_LINK_SETTINGS";
 	case I40E_ERR_AUTONEG_NOT_COMPLETE:
@@ -1624,6 +1626,35 @@ u32 i40e_led_get(struct i40e_hw *hw)
 	return mode;
 }
 
+/**
+ * i40e_led_get_blink - return current LED blink setting
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the LED_BLINK bit as defined in the
+ * GPIO register definitions (0 = no blink, 1 = do blink).
+ **/
+bool i40e_led_get_blink(struct i40e_hw *hw)
+{
+	bool blink = 0;
+	int i;
+
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
+
+		if (!gpio_val)
+			continue;
+
+		blink = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK) >>
+			I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+		break;
+	}
+
+	return blink;
+}
+
 /**
  * i40e_led_set - set new on/off mode
  * @hw: pointer to the hw struct
@@ -3120,6 +3151,46 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 	return status;
 }
 
+/**
+ * i40e_prepare_add_macvlan
+ * @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
+ * @count: length of the list
+ * @seid: VSI for the mac address
+ *
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
+ **/
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+			 struct i40e_aq_desc *desc, u16 count, u16 seid)
+{
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc->params.raw;
+	u16 buf_size;
+	int i;
+
+	buf_size = count * sizeof(*mv_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	for (i = 0; i < count; i++)
+		if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
+			mv_list[i].flags |=
+			    CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
+	desc->flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	return buf_size;
+}
+
 /**
  * i40e_aq_add_macvlan
  * @hw: pointer to the hw struct
@@ -3130,8 +3201,74 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
  *
  * Add MAC/VLAN addresses to the HW filtering
  **/
-enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_add_macvlan_element_data *mv_list,
+enum i40e_status_code
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+		    struct i40e_aqc_add_macvlan_element_data *mv_list,
+		    u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+enum i40e_status_code
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+		       enum i40e_admin_queue_err *aq_status)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+	status = i40e_asq_send_command_v2(hw, &desc, mv_list, buf_size,
+					  cmd_details, aq_status);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -3139,7 +3276,6 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 		(struct i40e_aqc_macvlan *)&desc.params.raw;
 	enum i40e_status_code status;
 	u16 buf_size;
-	int i;
 
 	if (count == 0 || !mv_list || !hw)
 		return I40E_ERR_PARAM;
@@ -3147,17 +3283,12 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 	buf_size = count * sizeof(*mv_list);
 
 	/* prep the rest of the request */
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
 	cmd->num_addresses = CPU_TO_LE16(count);
 	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
 	cmd->seid[1] = 0;
 	cmd->seid[2] = 0;
 
-	for (i = 0; i < count; i++)
-		if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
-			mv_list[i].flags |=
-			    CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
-
 	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (buf_size > I40E_AQ_LARGE_BUF)
 		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
@@ -3169,18 +3300,25 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 }
 
 /**
- * i40e_aq_remove_macvlan
+ * i40e_aq_remove_macvlan_v2
  * @hw: pointer to the hw struct
  * @seid: VSI for the mac address
  * @mv_list: list of macvlans to be removed
  * @count: length of the list
  * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
  *
- * Remove MAC/VLAN addresses from the HW filtering
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
  **/
-enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
-			u16 count, struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+			  enum i40e_admin_queue_err *aq_status)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_macvlan *cmd =
@@ -3204,8 +3342,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 	if (buf_size > I40E_AQ_LARGE_BUF)
 		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
-				       cmd_details);
+	status = i40e_asq_send_command_v2(hw, &desc, mv_list, buf_size,
+					  cmd_details, aq_status);
 
 	return status;
 }
@@ -5554,7 +5692,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 				struct i40e_filter_control_settings *settings)
 {
 	u32 fcoe_cntx_size, fcoe_filt_size;
-	u32 pe_cntx_size, pe_filt_size;
 	u32 fcoe_fmax;
 
 	u32 val;
@@ -5599,8 +5736,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 	case I40E_HASH_FILTER_SIZE_256K:
 	case I40E_HASH_FILTER_SIZE_512K:
 	case I40E_HASH_FILTER_SIZE_1M:
-		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
-		pe_filt_size <<= (u32)settings->pe_filt_num;
 		break;
 	default:
 		return I40E_ERR_PARAM;
@@ -5617,8 +5752,6 @@ STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
 	case I40E_DMA_CNTX_SIZE_64K:
 	case I40E_DMA_CNTX_SIZE_128K:
 	case I40E_DMA_CNTX_SIZE_256K:
-		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
-		pe_cntx_size <<= (u32)settings->pe_cntx_num;
 		break;
 	default:
 		return I40E_ERR_PARAM;
@@ -6803,6 +6936,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
 	case I40E_DEV_ID_10G_BASE_T4:
 	case I40E_DEV_ID_10G_BASE_T_BC:
 	case I40E_DEV_ID_5G_BASE_T_BC:
+	case I40E_DEV_ID_1G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
@@ -6839,7 +6973,9 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
 		break;
 	case I40E_DEV_ID_10G_BASE_T:
 	case I40E_DEV_ID_10G_BASE_T4:
+	case I40E_DEV_ID_10G_BASE_T_BC:
 	case I40E_DEV_ID_5G_BASE_T_BC:
+	case I40E_DEV_ID_1G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
@@ -8091,7 +8227,8 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 sec_off;
 	u32 i;
 
-	if (track_id == I40E_DDP_TRACKID_INVALID) {
+	if (track_id == I40E_DDP_TRACKID_INVALID ||
+	    track_id == I40E_DDP_TRACKID_RDONLY) {
 		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
 		return I40E_NOT_SUPPORTED;
 	}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 46add19c9f..8f9b7e823f 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -315,9 +315,15 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
 	 *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
 	 *        ---------------------------------
 	 */
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		etscfg->tcbwtable[i] = buf[offset++];
 
+		if (etscfg->prioritytable[i] == I40E_CEE_PGID_STRICT)
+			dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+		else
+			dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+	}
+
 	/* Number of TCs supported (1 octet) */
 	etscfg->maxtcs = buf[offset];
 }
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 02ae7be550..be261423b8 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -26,6 +26,7 @@
 #define I40E_DEV_ID_XXV710_N3000	0x0D58
 #define I40E_DEV_ID_10G_BASE_T_BC	0x15FF
 #define I40E_DEV_ID_5G_BASE_T_BC	0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC	0x0DD2
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
@@ -48,6 +49,7 @@
 #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
 #define I40E_DEV_ID_SFP_I_X722		0x37D3
+#define I40E_DEV_ID_SFP_X722_A		0x0DDA
 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
 #define I40E_DEV_ID_X722_VF		0x37CD
 #endif /* VF_DRIVER */
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index c9287ff255..437fb03f4d 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -165,7 +165,7 @@ static inline uint64_t i40e_read64_addr(volatile void *addr)
 	I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
 #define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
 
-#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
 
 /* memory allocation tracking */
 struct i40e_dma_mem {
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 124222e476..8c21ac71ab 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -38,6 +38,13 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
 				void *buff, /* can be NULL */
 				u16  buff_size,
 				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+			 struct i40e_aq_desc *desc,
+			 void *buff, /* can be NULL */
+			 u16  buff_size,
+			 struct i40e_asq_cmd_details *cmd_details,
+			 enum i40e_admin_queue_err *aq_status);
 #ifdef VF_DRIVER
 bool i40e_asq_done(struct i40e_hw *hw);
 #endif
@@ -66,6 +73,7 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
 #ifdef PF_DRIVER
 
 u32 i40e_led_get(struct i40e_hw *hw);
+bool i40e_led_get_blink(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
 				       u16 led_addr, u32 mode);
@@ -188,9 +196,19 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_add_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+		       enum i40e_admin_queue_err *aq_status);
 enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+			  enum i40e_admin_queue_err *aq_status);
 enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
 			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
 			struct i40e_asq_cmd_details *cmd_details,
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index ee4f333f9c..651b0230f7 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -1411,6 +1411,11 @@
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+/* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_LINK_STATUS1(_i) (0x0008C200 + ((_i) * 4))
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT 24
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_MASK \
+	I40E_MASK(0x7, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT)
 #define I40E_GL_FWRESETCNT                  0x00083100 /* Reset: POR */
 #define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
 #define I40E_GL_FWRESETCNT_FWRESETCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
@@ -2390,10 +2395,14 @@
 #define I40E_GL_FCOERPDC_MAX_INDEX      143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
 #define I40E_GL_FCOERPDC_FCOERPDC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX       143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR1H(_i)             (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX       143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT   0
+#define I40E_GL_RXERR1H_RXERR1H_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX       143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT   0
+#define I40E_GL_RXERR1L_RXERR1L_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
 #define I40E_GL_RXERR2_L(_i)             (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_RXERR2_L_MAX_INDEX       143
 #define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
@@ -3620,27 +3629,6 @@
 #define I40E_GLHMC_PETIMEROBJSZ                      0x000C2080 /* Reset: CORER */
 #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
 #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i)               (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX         15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i)              (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX        15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i)                 (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX           15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX                   0x000C204c /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK  I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX                 0x000C2048 /* Reset: CORER */
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK  I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ                   0x000C2044 /* Reset: CORER */
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
 #define I40E_GLHMC_PFPESDPART(_i)            (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFPESDPART_MAX_INDEX      15
 #define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
@@ -3761,18 +3749,6 @@
 #define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX           31
 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i)               (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX         31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i)              (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX        31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i)                 (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX           31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
 #define I40E_GLHMC_VFSDPART(_i)            (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
 #define I40E_GLHMC_VFSDPART_MAX_INDEX      31
 #define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
@@ -3873,6 +3849,11 @@
 #define I40E_PRTMAC_LINK_DOWN_COUNTER                         0x001E2440 /* Reset: GLOBR */
 #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
 #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+/* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_LINKSTA(_i) (0x001E2420 + ((_i) * 4))
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT 27
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_MASK \
+	I40E_MASK(0x7, I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT)
 #define I40E_GLNVM_AL_REQ                        0x000B6164 /* Reset: POR */
 #define I40E_GLNVM_AL_REQ_POR_SHIFT              0
 #define I40E_GLNVM_AL_REQ_POR_MASK               I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h
index cd72169f14..89b05ede3e 100644
--- a/drivers/net/i40e/base/i40e_status.h
+++ b/drivers/net/i40e/base/i40e_status.h
@@ -19,7 +19,7 @@ enum i40e_status_code {
 	I40E_ERR_ADAPTER_STOPPED		= -9,
 	I40E_ERR_INVALID_MAC_ADDR		= -10,
 	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
-	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_PRIMARY_REQUESTS_PENDING	= -12,
 	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
 	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
 	I40E_ERR_RESET_FAILED			= -15,
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 4674715ed7..3cfb0ca430 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -236,6 +236,14 @@ enum i40e_queue_type {
 	I40E_QUEUE_TYPE_UNKNOWN
 };
 
+enum i40e_prt_mac_link_speed {
+	I40E_PRT_MAC_LINK_SPEED_100MB = 0,
+	I40E_PRT_MAC_LINK_SPEED_1GB,
+	I40E_PRT_MAC_LINK_SPEED_10GB,
+	I40E_PRT_MAC_LINK_SPEED_40GB,
+	I40E_PRT_MAC_LINK_SPEED_20GB
+};
+
 struct i40e_link_status {
 	enum i40e_aq_phy_type phy_type;
 	enum i40e_aq_link_speed link_speed;
@@ -809,7 +817,7 @@ union i40e_32byte_rx_desc {
 		__le64  rsvd2;
 	} read;
 	struct {
-		struct {
+		struct i40e_32b_rx_wb_qw0 {
 			struct {
 				union {
 					__le16 mirroring_status;
@@ -847,6 +855,9 @@ union i40e_32byte_rx_desc {
 			} hi_dword;
 		} qword3;
 	} wb;  /* writeback */
+	struct {
+		u64 qword[4];
+	} raw;
 };
 
 #define I40E_RXD_QW0_MIRROR_STATUS_SHIFT	8
@@ -1417,6 +1428,7 @@ struct i40e_eth_stats {
 	u64 tx_broadcast;		/* bptc */
 	u64 tx_discards;		/* tdpc */
 	u64 tx_errors;			/* tepc */
+	u64 rx_discards_other;          /* rxerr1 */
 };
 
 /* Statistics collected per VEB per TC */
@@ -1551,6 +1563,9 @@ struct i40e_hw_port_stats {
 #define I40E_SR_FEATURE_CONFIGURATION_PTR	0x49
 #define I40E_SR_CONFIGURATION_METADATA_PTR	0x4D
 #define I40E_SR_IMMEDIATE_VALUES_PTR		0x4E
+#define I40E_SR_PRESERVATION_RULES_PTR		0x70
+#define I40E_X722_SR_5TH_FREE_PROVISION_AREA_PTR	0x71
+#define I40E_SR_6TH_FREE_PROVISION_AREA_PTR	0x71
 
 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
 #define I40E_SR_VPD_MODULE_MAX_SIZE		1024
@@ -1908,6 +1923,10 @@ struct i40e_lldp_variables {
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
 
 /* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT		49
+#define I40E_X722_L3_SRC_MASK		(0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT		41
+#define I40E_X722_L3_DST_MASK		(0x3ULL << I40E_X722_L3_DST_SHIFT)
 #define I40E_L3_SRC_SHIFT		47
 #define I40E_L3_SRC_MASK		(0x3ULL << I40E_L3_SRC_SHIFT)
 #define I40E_L3_V6_SRC_SHIFT		43
@@ -1974,6 +1993,10 @@ struct i40e_metadata_segment {
 	struct i40e_ddp_version version;
 #define I40E_DDP_TRACKID_RDONLY		0
 #define I40E_DDP_TRACKID_INVALID	0xFFFFFFFF
+#define I40E_DDP_TRACKID_GRP_MSK	0x00FF0000
+#define I40E_DDP_TRACKID_GRP_COMP_ALL	0xFF
+#define I40E_DDP_TRACKID_PKGTYPE_MSK	0xFF000000
+#define I40E_DDP_TRACKID_PKGTYPE_RDONLY	0
 	u32 track_id;
 	char name[I40E_DDP_NAME_SIZE];
 };
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index ba4a4a9ddc..5a1f5fcf38 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -397,6 +397,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
 				      struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
 	const struct rte_eth_ethertype_filter *input,
@@ -440,7 +441,10 @@ static const struct rte_pci_id pci_id_i40e_map[] = {
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
+#ifdef X722_A0_SUPPORT
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
+#endif
+	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722_A) },
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
@@ -1775,11 +1779,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	 */
 	i40e_add_tx_flow_control_drop_filter(pf);
 
-	/* Set the max frame size to 0x2600 by default,
-	 * in case other drivers changed the default value.
-	 */
-	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
-
 	/* initialize mirror rule list */
 	TAILQ_INIT(&pf->mirror_list);
 
@@ -2434,6 +2433,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	uint32_t intr_vector = 0;
 	struct i40e_vsi *vsi;
 	uint16_t nb_rxq, nb_txq;
+	uint16_t max_frame_size;
 
 	hw->adapter_stopped = 0;
 
@@ -2575,6 +2575,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
+	i40e_set_mac_max_frame(dev, max_frame_size);
+
 	return I40E_SUCCESS;
 
 tx_err:
@@ -2942,11 +2945,13 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
+#define CHECK_INTERVAL             100  /* 100ms */
+#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
+
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
 /* Link status registers and values*/
-#define I40E_PRTMAC_LINKSTA		0x001E2420
 #define I40E_REG_LINK_UP		0x40000080
 #define I40E_PRTMAC_MACC		0x001E24E0
 #define I40E_REG_MACC_25GB		0x00020000
@@ -2959,7 +2964,7 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	uint32_t link_speed;
 	uint32_t reg_val;
 
-	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0));
 	link_speed = reg_val & I40E_REG_SPEED_MASK;
 	reg_val &= I40E_REG_LINK_UP;
 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
@@ -3009,8 +3014,6 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	bool enable_lse, int wait_to_complete)
 {
-#define CHECK_INTERVAL             100  /* 100ms */
-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
 	uint32_t rep_cnt = MAX_REPEAT_TIME;
 	struct i40e_link_status link_status;
 	int status;
@@ -3297,7 +3300,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
 			    &os->eth.rx_unknown_protocol,
 			    &ns->eth.rx_unknown_protocol);
 	i40e_stat_update_48(hw, I40E_GL_RXERR1_H(hw->pf_id + I40E_MAX_VF),
-			    I40E_GL_RXERR1_L(hw->pf_id + I40E_MAX_VF),
+			    I40E_GL_RXERR1L(hw->pf_id + I40E_MAX_VF),
 			    pf->offset_loaded, &pf->rx_err1_offset,
 			    &pf->rx_err1);
 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
@@ -6813,6 +6816,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 			if (!ret)
 				rte_eth_dev_callback_process(dev,
 					RTE_ETH_EVENT_INTR_LSC, NULL);
+
 			break;
 		default:
 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -7104,6 +7108,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 	int ret = I40E_SUCCESS;
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
 	struct i40e_aqc_remove_macvlan_element_data *req_list;
+	enum i40e_admin_queue_err aq_status;
 
 	if (filter == NULL  || total == 0)
 		return I40E_ERR_PARAM;
@@ -7151,11 +7156,19 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 			req_list[i].flags = rte_cpu_to_le_16(flags);
 		}
 
-		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
-						actual_num, NULL);
+		ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list,
+						actual_num, NULL, &aq_status);
+
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
-			goto DONE;
+			/* Do not report as an error
+			 * when firmware returns ENOENT
+			 */
+			if (aq_status == I40E_AQ_RC_ENOENT) {
+				ret = I40E_SUCCESS;
+			} else {
+				PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+				goto DONE;
+			}
 		}
 		num += actual_num;
 	} while (num < total);
@@ -12585,6 +12598,31 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	return ret;
 }
 
+static void
+i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t rep_cnt = MAX_REPEAT_TIME;
+	struct rte_eth_link link;
+	enum i40e_status_code status;
+
+	do {
+		update_link_reg(hw, &link);
+		if (link.link_status)
+			break;
+
+		rte_delay_ms(CHECK_INTERVAL);
+	} while (--rep_cnt);
+
+	if (link.link_status) {
+		status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
+		if (status != I40E_SUCCESS)
+			PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
+	} else {
+		PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
+	}
+}
+
 int
 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
 		   const struct rte_flow_action_rss *in)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b56f9f9149..e6adbb425e 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1153,10 +1153,6 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 }
 
 static const struct rte_pci_id pci_id_i40evf_map[] = {
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
-	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
@@ -1236,7 +1232,7 @@ i40evf_reset_vf(struct rte_eth_dev *dev)
 	  * it to ACTIVE. In this duration, vf may not catch the moment that
 	  * COMPLETE is set. So, for vf, we'll try to wait a long time.
 	  */
-	rte_delay_ms(200);
+	rte_delay_ms(500);
 
 	ret = i40evf_check_vf_reset_done(dev);
 	if (ret) {
diff --git a/drivers/net/i40e/i40e_regs.h b/drivers/net/i40e/i40e_regs.h
index b19bb1d5a5..cf62c9dfb7 100644
--- a/drivers/net/i40e/i40e_regs.h
+++ b/drivers/net/i40e/i40e_regs.h
@@ -586,9 +586,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"},
 	{I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"},
 	{I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"},
-	{I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"},
-	{I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"},
-	{I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"},
 	{I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"},
 	{I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"},
 	{I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"},
@@ -616,9 +613,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"},
 	{I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"},
 	{I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"},
-	{I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"},
-	{I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"},
-	{I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"},
 	{I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"},
 	{I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"},
 	{I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"},
@@ -653,9 +647,6 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"},
 	{I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"},
 	{I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"},
-	{I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"},
-	{I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"},
-	{I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"},
 	{I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"},
 	{I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"},
 	{I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"},
@@ -896,7 +887,7 @@ static const struct i40e_reg_info i40e_regs_others[] = {
 	{I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"},
 	{I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"},
 	{I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"},
-	{I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
+	{I40E_GL_RXERR1L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
 	{I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"},
 	{I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"},
 	{I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"},
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index b26bd0640d..91ccc345f0 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -449,7 +449,11 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
 					I40E_RXD_QW1_STATUS_SHIFT;
 		}
 
-		rte_smp_rmb();
+		/**
+		 * This barrier is to order loads of different words
+		 *  in the descriptor.
+		 */
+		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
 		/* Compute how many status bits were set */
 		for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 168e4fef02..01cfc44a66 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -127,6 +127,10 @@ static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 
 static const struct rte_pci_id pci_id_iavf_map[] = {
 	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
+	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
@@ -1288,6 +1292,9 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
+		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
+					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
 				pstats->rx_broadcast - pstats->rx_discards;
@@ -1296,7 +1303,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		stats->imissed = pstats->rx_discards;
 		stats->oerrors = pstats->tx_errors + pstats->tx_discards;
 		stats->ibytes = pstats->rx_bytes;
-		stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+		stats->ibytes -= stats->ipackets * crc_stats_len;
 		stats->obytes = pstats->tx_bytes;
 	} else {
 		PMD_DRV_LOG(ERR, "Get statistics failed");
@@ -1864,6 +1871,9 @@ iavf_init_vf(struct rte_eth_dev *dev)
 		}
 	}
 
+	if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
+		vf->lv_enabled = true;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
 		if (iavf_get_supported_rxdid(adapter) != 0) {
 			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
@@ -1888,6 +1898,27 @@ iavf_init_vf(struct rte_eth_dev *dev)
 	return -1;
 }
 
+static void
+iavf_uninit_vf(struct rte_eth_dev *dev)
+{
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_shutdown_adminq(hw);
+
+	rte_free(vf->vf_res);
+	vf->vsi_res = NULL;
+	vf->vf_res = NULL;
+
+	rte_free(vf->aq_resp);
+	vf->aq_resp = NULL;
+
+	rte_free(vf->rss_lut);
+	vf->rss_lut = NULL;
+	rte_free(vf->rss_key);
+	vf->rss_key = NULL;
+}
+
 /* Enable default admin queue interrupt setting */
 static inline void
 iavf_enable_irq0(struct iavf_hw *hw)
@@ -2013,7 +2044,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
 			     " store MAC addresses",
 			     RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto init_vf_err;
 	}
 	/* If the MAC address is not configured by host,
 	 * generate a random one.
@@ -2038,10 +2070,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	ret = iavf_flow_init(adapter);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to initialize flow");
-		return ret;
+		goto flow_init_err;
 	}
 
 	return 0;
+
+flow_init_err:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+
+init_vf_err:
+	iavf_uninit_vf(eth_dev);
+
+	return ret;
 }
 
 static int
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 555551008b..262d366461 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -439,48 +439,53 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
 #endif
 }
 
+static const
+iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[IAVF_RXDID_LAST + 1] = {
+	[IAVF_RXDID_LEGACY_0] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+	[IAVF_RXDID_LEGACY_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+	[IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
+		iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+	[IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
+		iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+	[IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+};
+
 static void
 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 {
+	rxq->rxdid = rxdid;
+
 	switch (rxdid) {
 	case IAVF_RXDID_COMMS_AUX_VLAN:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV4:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV6:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
 		rxq->xtr_ol_flag =
 			rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_TCP:
 		rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 	case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
 		rxq->xtr_ol_flag =
 			rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
-		rxq->rxd_to_pkt_fields =
-			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
 	case IAVF_RXDID_COMMS_OVS_1:
-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+	case IAVF_RXDID_LEGACY_0:
+	case IAVF_RXDID_LEGACY_1:
 		break;
 	default:
-		/* update this according to the RXDID for FLEX_DESC_NONE */
-		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+		rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
 		break;
 	}
 
@@ -506,9 +511,12 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	uint8_t proto_xtr;
 	uint16_t len;
 	uint16_t rx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > IAVF_MAX_RING_DESC ||
 	    nb_desc < IAVF_MIN_RING_DESC) {
@@ -562,6 +570,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->rx_hdr_len = 0;
 	rxq->vsi = vsi;
+	rxq->offloads = offloads;
 
 	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
@@ -1105,7 +1114,7 @@ iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
 			   rxq->port_id, rxq->queue_id, rx_id, nb_hold);
 		rx_id = (uint16_t)((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
-		IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+		IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 		nb_hold = 0;
 	}
 	rxq->nb_rx_hold = nb_hold;
@@ -1304,7 +1313,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
 
@@ -1446,7 +1455,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		first_seg->ol_flags |= pkt_flags;
@@ -1637,7 +1646,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 	struct rte_mbuf *mb;
 	uint16_t stat_err0;
 	uint16_t pkt_len;
-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
 	int32_t i, j, nb_rx = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1662,9 +1671,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 		rte_smp_rmb();
 
-		/* Compute how many status bits were set */
-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-			nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+		/* Compute how many contiguous DD bits were set */
+		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+			var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+#ifdef RTE_ARCH_ARM
+			/* For Arm platforms, count only contiguous descriptors
+			 * whose DD bit is set to 1. On Arm platforms, reads of
+			 * descriptors can be reordered. Since the CPU may
+			 * be reading the descriptors as the NIC updates them
+			 * in memory, it is possbile that the DD bit for a
+			 * descriptor earlier in the queue is read as not set
+			 * while the DD bit for a descriptor later in the queue
+			 * is read as set.
+			 */
+			if (var)
+				nb_dd += 1;
+			else
+				break;
+#else
+			nb_dd += var;
+#endif
+		}
 
 		nb_rx += nb_dd;
 
@@ -1684,7 +1711,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
-			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
@@ -1714,7 +1741,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 	uint16_t pkt_len;
 	uint64_t qword1;
 	uint32_t rx_status;
-	int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+	int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
 	int32_t i, j, nb_rx = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1745,9 +1772,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 
 		rte_smp_rmb();
 
-		/* Compute how many status bits were set */
-		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
-			nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+		/* Compute how many contiguous DD bits were set */
+		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
+			var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+#ifdef RTE_ARCH_ARM
+			/* For Arm platforms, count only contiguous descriptors
+			 * whose DD bit is set to 1. On Arm platforms, reads of
+			 * descriptors can be reordered. Since the CPU may
+			 * be reading the descriptors as the NIC updates them
+			 * in memory, it is possbile that the DD bit for a
+			 * descriptor earlier in the queue is read as not set
+			 * while the DD bit for a descriptor later in the queue
+			 * is read as set.
+			 */
+			if (var)
+				nb_dd += 1;
+			else
+				break;
+#else
+			nb_dd += var;
+#endif
+		}
 
 		nb_rx += nb_dd;
 
@@ -1854,7 +1899,7 @@ iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
 
 	/* Update rx tail register */
 	rte_wmb();
-	IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+	IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
 
 	rxq->rx_free_trigger =
 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -2267,7 +2312,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
 		   txq->port_id, txq->queue_id, tx_id, nb_tx);
 
-	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
 	txq->tx_tail = tx_id;
 
 	return nb_tx;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index decfe3ad4c..2d48b65922 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -192,9 +192,8 @@ struct iavf_rx_queue {
 	const struct iavf_rxq_ops *ops;
 	uint8_t proto_xtr; /* protocol extraction type */
 	uint64_t xtr_ol_flag;
-		/* flexible descriptor metadata extraction offload flag */
-	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
-				/* handle flexible descriptor by RXDID */
+	/* flexible descriptor metadata extraction offload flag */
+	uint64_t offloads;
 };
 
 struct iavf_tx_entry {
@@ -349,41 +348,6 @@ enum iavf_rxdid {
 	IAVF_RXDID_LAST			= 63,
 };
 
-enum iavf_rx_flex_desc_status_error_0_bits {
-	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
-	IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
-	IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
-	IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
-	IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
-	IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
-	IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
-	IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
-	IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
-};
-
-enum iavf_rx_flex_desc_status_error_1_bits {
-	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
-	/* [10:6] reserved */
-	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
-	IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
-	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
-};
-
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index a006d90a24..c20e9ccaa9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1399,7 +1399,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	txq->tx_tail = tx_id;
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index f61681474c..5a84c42cf9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -140,7 +140,7 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
 			   (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 
 #define IAVF_RX_LEN_MASK 0x80808080
@@ -1654,7 +1654,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	txq->tx_tail = tx_id;
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 7629474508..3dfd921df9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -475,7 +475,7 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
 			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 #endif
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3298d71317..572078c7cd 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -88,7 +88,7 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
 		   rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
 
 	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
 }
 
 static inline void
@@ -1172,7 +1172,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
 		   txq->port_id, txq->queue_id, tx_id, nb_pkts);
 
-	IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
 
 	return nb_pkts;
 }
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1460330572..156ccd21e4 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -157,7 +157,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (_atomic_set_cmd(vf, args->ops))
 		return -1;
 
-	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
+	ret = iavf_aq_send_msg_to_pf(hw, args->ops, VIRTCHNL_STATUS_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
@@ -463,7 +463,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
-		VIRTCHNL_VF_LARGE_NUM_QPAIRS;
+		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index e371b3dc67..db24c554f7 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -40,7 +40,7 @@ static __rte_always_inline int
 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
 			    uint8_t *req_msg, uint16_t req_msglen)
 {
-	return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
+	return iavf_aq_send_msg_to_pf(&hw->avf, op, VIRTCHNL_STATUS_SUCCESS,
 				      req_msg, req_msglen, NULL);
 }
 
@@ -105,7 +105,7 @@ static __rte_always_inline int
 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
 {
 	return iavf_aq_send_msg_to_pf(&hw->avf,
-				      cmd->v_op, IAVF_SUCCESS,
+				      cmd->v_op, VIRTCHNL_STATUS_SUCCESS,
 				      cmd->req_msg, cmd->req_msglen, NULL);
 }
 
-- 
2.27.0


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-04-07  6:19 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-28  8:04 [RFC] net/i40e: backport i40e fixes and share code to 20.11.4 Steve Yang
2022-04-07  6:11 ` [RFC v2] " Steve Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).